code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase__ = parse(importlib.metadata.version("""torch"""))
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Version] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
__a = STR_OPERATION_TO_FUNC[operation]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a = parse(importlib.metadata.version(_SCREAMING_SNAKE_CASE ) )
return operation(_SCREAMING_SNAKE_CASE , parse(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 302
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 1
|
from manim import *
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = Rectangle(height=0.5 , width=0.5 )
__a = Rectangle(height=0.25 , width=0.25 )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a = [mem.copy() for i in range(6 )]
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__a = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__a = Text("""CPU""" , font_size=24 )
__a = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
__a = [mem.copy() for i in range(4 )]
__a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__a = Text("""GPU""" , font_size=24 )
__a = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__a = Text("""Model""" , font_size=24 )
__a = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
__a = []
__a = []
__a = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
__a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__a = Text("""Loaded Checkpoint""" , font_size=24 )
__a = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
__a = []
__a = []
for i, rect in enumerate(__lowercase ):
__a = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
__a = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
__a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
__a = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
__a = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a = [meta_mem.copy() for i in range(6 )]
__a = [meta_mem.copy() for i in range(6 )]
__a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__a = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__a = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__a = Text("""Disk""" , font_size=24 )
__a = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
__a = []
for i, rect in enumerate(__lowercase ):
__a = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
__a = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait()
| 302
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCamelCase__ = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCamelCase__ = """UperNetConfig"""
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Dict , __lowercase : int , __lowercase : int , __lowercase : Union[int, Tuple[int, int]] , __lowercase : Union[int, Tuple[int, int], str] = 0 , __lowercase : bool = False , __lowercase : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
__a = nn.Convad(
in_channels=__lowercase , out_channels=__lowercase , kernel_size=__lowercase , padding=__lowercase , bias=__lowercase , dilation=__lowercase , )
__a = nn.BatchNormad(__lowercase )
__a = nn.ReLU()
def UpperCamelCase_ ( self : List[str] , __lowercase : torch.Tensor ):
'''simple docstring'''
__a = self.conv(__lowercase )
__a = self.batch_norm(__lowercase )
__a = self.activation(__lowercase )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Union[str, Any] , __lowercase : int , __lowercase : int , __lowercase : int ):
'''simple docstring'''
super().__init__()
__a = [
nn.AdaptiveAvgPoolad(__lowercase ),
UperNetConvModule(__lowercase , __lowercase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowercase ) , __lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : torch.Tensor ):
'''simple docstring'''
__a = input
for layer in self.layers:
__a = layer(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , __lowercase : Tuple[int, ...] , __lowercase : int , __lowercase : int , __lowercase : bool ):
'''simple docstring'''
super().__init__()
__a = pool_scales
__a = align_corners
__a = in_channels
__a = channels
__a = []
for i, pool_scale in enumerate(__lowercase ):
__a = UperNetPyramidPoolingBlock(pool_scale=__lowercase , in_channels=__lowercase , channels=__lowercase )
self.blocks.append(__lowercase )
self.add_module(str(__lowercase ) , __lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : torch.Tensor ):
'''simple docstring'''
__a = []
for ppm in self.blocks:
__a = ppm(__lowercase )
__a = nn.functional.interpolate(
__lowercase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(__lowercase )
return ppm_outs
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Union[str, Any] , __lowercase : Tuple , __lowercase : Dict ):
'''simple docstring'''
super().__init__()
__a = config
__a = config.pool_scales # e.g. (1, 2, 3, 6)
__a = in_channels
__a = config.hidden_size
__a = False
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__a = nn.ModuleList()
__a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__a = UperNetConvModule(__lowercase , self.channels , kernel_size=1 )
__a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowercase )
self.fpn_convs.append(__lowercase )
__a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.apply(self._init_weights )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : str ):
'''simple docstring'''
if isinstance(__lowercase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCamelCase_ ( self : List[Any] , __lowercase : List[str] ):
'''simple docstring'''
__a = inputs[-1]
__a = [x]
psp_outs.extend(self.psp_modules(__lowercase ) )
__a = torch.cat(__lowercase , dim=1 )
__a = self.bottleneck(__lowercase )
return output
def UpperCamelCase_ ( self : Optional[int] , __lowercase : torch.Tensor ):
'''simple docstring'''
# build laterals
__a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowercase ) )
# build top-down path
__a = len(__lowercase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = laterals[i - 1].shape[2:]
__a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowercase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
__a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
__a = torch.cat(__lowercase , dim=1 )
__a = self.fpn_bottleneck(__lowercase )
__a = self.classifier(__lowercase )
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[str] , __lowercase : List[str] , __lowercase : int = 2 , __lowercase : int = 3 , __lowercase : Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
__a = config
__a = config.auxiliary_in_channels
__a = config.auxiliary_channels
__a = config.auxiliary_num_convs
__a = config.auxiliary_concat_input
__a = in_index
__a = (kernel_size // 2) * dilation
__a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowercase , padding=__lowercase , dilation=__lowercase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowercase , padding=__lowercase , dilation=__lowercase ) )
if self.num_convs == 0:
__a = nn.Identity()
else:
__a = nn.Sequential(*__lowercase )
if self.concat_input:
__a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowercase , padding=kernel_size // 2 )
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.apply(self._init_weights )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Dict ):
'''simple docstring'''
if isinstance(__lowercase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCamelCase_ ( self : List[Any] , __lowercase : torch.Tensor ):
'''simple docstring'''
# just take the relevant feature maps
__a = encoder_hidden_states[self.in_index]
__a = self.convs(__lowercase )
if self.concat_input:
__a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__a = self.classifier(__lowercase )
return output
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Any =UperNetConfig
__lowerCamelCase : List[str] ='pixel_values'
__lowerCamelCase : Any =True
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCamelCase_ ( self : int , __lowercase : int , __lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
__a = value
lowerCamelCase__ = r"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , lowerCamelCase__ , )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : int , __lowercase : Tuple ):
'''simple docstring'''
super().__init__(__lowercase )
__a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__a = UperNetHead(__lowercase , in_channels=self.backbone.channels )
__a = UperNetFCNHead(__lowercase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=__lowercase , config_class=_CONFIG_FOR_DOC )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , ):
'''simple docstring'''
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
__a = self.backbone.forward_with_filtered_kwargs(
__lowercase , output_hidden_states=__lowercase , output_attentions=__lowercase )
__a = outputs.feature_maps
__a = self.decode_head(__lowercase )
__a = nn.functional.interpolate(__lowercase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowercase )
__a = None
if self.auxiliary_head is not None:
__a = self.auxiliary_head(__lowercase )
__a = nn.functional.interpolate(
__lowercase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowercase )
__a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
__a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__a = loss_fct(__lowercase , __lowercase )
__a = loss_fct(__lowercase , __lowercase )
__a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__a = (logits,) + outputs[1:]
else:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 302
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='mobilenet_v1'
def __init__( self : Dict , __lowercase : Union[str, Any]=3 , __lowercase : Optional[int]=224 , __lowercase : str=1.0 , __lowercase : str=8 , __lowercase : List[Any]="relu6" , __lowercase : Dict=True , __lowercase : List[str]=0.999 , __lowercase : Optional[Any]=0.02 , __lowercase : Optional[Any]=0.001 , **__lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**__lowercase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = min_depth
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] =version.parse('1.11' )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 1E-4
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict ='time_series_transformer'
__lowerCamelCase : List[str] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : Optional[Union[str, bool]] = "mean" , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : bool = True , __lowercase : str = "gelu" , __lowercase : int = 64 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : Tuple=True , **__lowercase : int , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length or prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(__lowercase ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 1
|
import numpy as np
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return np.where(vector > 0 , _SCREAMING_SNAKE_CASE , (alpha * (np.exp(_SCREAMING_SNAKE_CASE ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 1
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any]=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
__a = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(f"Loading PyTorch weights from {pt_path}" )
__a = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
__a = convert_pytorch_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__a = convert_pytorch_sharded_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return flax_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, jnp.ndarray] , _SCREAMING_SNAKE_CASE : str , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE : Tuple[str] ) -> bool:
return len(set(_SCREAMING_SNAKE_CASE ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__a = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__a = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__a = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__a = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__a = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
__a = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__a = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
__a = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__a = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__a = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__a = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__a = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__a = pt_tuple_key[-2] + """_v"""
if name is not None:
__a = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = {k: v.numpy() for k, v in pt_state_dict.items()}
__a = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__a = flax_model.params["""params"""]
else:
__a = flax_model.params
__a = flatten_dict(_SCREAMING_SNAKE_CASE )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_SCREAMING_SNAKE_CASE )
__a = {}
__a = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__a = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__a = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a = pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a = rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
__a = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__a = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
__a = jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
__a = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
import torch
# Load the index
__a = {}
for shard_file in shard_filenames:
# load using msgpack utils
__a = torch.load(_SCREAMING_SNAKE_CASE )
__a = {k: v.numpy() for k, v in pt_state_dict.items()}
__a = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__a = flax_model.params["""params"""]
__a = flatten_dict(_SCREAMING_SNAKE_CASE )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
__a = flax_model.params
__a = flatten_dict(_SCREAMING_SNAKE_CASE )
__a = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__a = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__a = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__a = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__a = pt_tuple_key[1:]
# Correctly rename weight parameters
__a , __a = rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
__a = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__a = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__a = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
if "var" in flax_key[-1]:
__a = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
__a = jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
__a = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(f"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
__a = getattr(_SCREAMING_SNAKE_CASE , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as state_f:
try:
__a = from_bytes(_SCREAMING_SNAKE_CASE , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
__a = flatten_dict(jax.tree_util.tree_map(lambda _SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , _SCREAMING_SNAKE_CASE ) ).values()
if any(_SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
__a = jax.tree_util.tree_map(
lambda _SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _SCREAMING_SNAKE_CASE )
__a = flatten_dict(_SCREAMING_SNAKE_CASE )
__a = pt_model.state_dict()
__a = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
__a = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__a = []
__a = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__a = flax_key_tuple[0] == pt_model.base_model_prefix
__a = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__a = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__a = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# conv layer
__a = flax_key_tuple[:-1] + ("""weight""",)
__a = jnp.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# linear layer
__a = flax_key_tuple[:-1] + ("""weight""",)
__a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__a = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
__a = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
__a = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__a = """.""".join(_SCREAMING_SNAKE_CASE )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__a = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__a = key.split(""".""" )
__a = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__a = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
__a = key_components[-2] + """_v"""
if name is not None:
__a = key_components[:-3] + [name]
__a = """.""".join(_SCREAMING_SNAKE_CASE )
__a = key
if flax_key in special_pt_names:
__a = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__a = np.asarray(_SCREAMING_SNAKE_CASE ) if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__a = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(_SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__a = list(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"""If your task is similar to the task the model of the checkpoint was trained on, """
f"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 302
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 1
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.dummy_uncond_unet
__a = ScoreSdeVeScheduler()
__a = ScoreSdeVePipeline(unet=__lowercase , scheduler=__lowercase )
sde_ve.to(__lowercase )
sde_ve.set_progress_bar_config(disable=__lowercase )
__a = torch.manual_seed(0 )
__a = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__lowercase ).images
__a = torch.manual_seed(0 )
__a = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__lowercase , return_dict=__lowercase )[
0
]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = """google/ncsnpp-church-256"""
__a = UNetaDModel.from_pretrained(__lowercase )
__a = ScoreSdeVeScheduler.from_pretrained(__lowercase )
__a = ScoreSdeVePipeline(unet=__lowercase , scheduler=__lowercase )
sde_ve.to(__lowercase )
sde_ve.set_progress_bar_config(disable=__lowercase )
__a = torch.manual_seed(0 )
__a = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__lowercase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__a = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 302
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =CustomTokenizer
pass
| 302
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = len(_SCREAMING_SNAKE_CASE ) + 1
__a = len(_SCREAMING_SNAKE_CASE ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__a = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
# since string of zero length match pattern of zero length
__a = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _SCREAMING_SNAKE_CASE ):
__a = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _SCREAMING_SNAKE_CASE ):
__a = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _SCREAMING_SNAKE_CASE ):
for j in range(1 , _SCREAMING_SNAKE_CASE ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__a = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__a = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__a = dp[i - 1][j]
else:
__a = 0
else:
__a = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCamelCase__ = """aab"""
lowerCamelCase__ = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 302
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] =ReformerTokenizer
__lowerCamelCase : Optional[Any] =ReformerTokenizerFast
__lowerCamelCase : List[str] =True
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =True
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
__a = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = """<s>"""
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__lowercase ) , 1000 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = """I was born in 92000, and this is falsé."""
__a = tokenizer.tokenize(__lowercase )
__a = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__a = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__a = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(__lowercase )
__a = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : Dict=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# Simple input
__a = """This is a simple input"""
__a = ["""This is a simple input 1""", """This is a simple input 2"""]
__a = ("""This is a simple input""", """This is a pair""")
__a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
__a = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [285, 46, 10, 170, 382] , )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = """Hello World!"""
__a = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__a = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__a = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a = """ """.join(__lowercase )
__a = self.big_tokenizer.encode_plus(__lowercase , return_tensors="""pt""" )
__a = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
__a = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__a = encoded_sequence["""input_ids"""].shape
__a = ReformerModel(__lowercase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase )
model(**__lowercase )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# fmt: off
__a = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__a = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=__lowercase , sequences=__lowercase , )
| 302
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : str ='distilbert'
__lowerCamelCase : List[str] ={
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : str , __lowercase : Optional[Any]=30522 , __lowercase : Optional[int]=512 , __lowercase : Union[str, Any]=False , __lowercase : Tuple=6 , __lowercase : Optional[Any]=12 , __lowercase : int=768 , __lowercase : Optional[Any]=4 * 768 , __lowercase : List[str]=0.1 , __lowercase : int=0.1 , __lowercase : List[Any]="gelu" , __lowercase : Union[str, Any]=0.02 , __lowercase : Any=0.1 , __lowercase : Any=0.2 , __lowercase : Optional[int]=0 , **__lowercase : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = sinusoidal_pos_embds
__a = n_layers
__a = n_heads
__a = dim
__a = hidden_dim
__a = dropout
__a = attention_dropout
__a = activation
__a = initializer_range
__a = qa_dropout
__a = seq_classif_dropout
super().__init__(**__lowercase , pad_token_id=__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 302
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 1
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Optional[int] , __lowercase : Tuple=3 , __lowercase : List[Any]=7 , __lowercase : Any=True , __lowercase : Union[str, Any]=True , __lowercase : Optional[Any]=False , __lowercase : Dict=True , __lowercase : Any=99 , __lowercase : Optional[Any]=32 , __lowercase : Dict=5 , __lowercase : Dict=4 , __lowercase : List[Any]=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : List[str]=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=512 , __lowercase : List[str]=16 , __lowercase : Optional[Any]=2 , __lowercase : List[Any]=0.02 , __lowercase : Dict=3 , __lowercase : List[Any]=4 , __lowercase : int=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowercase , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : List[str] ):
'''simple docstring'''
__a = FalconModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase )
__a = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : Dict , ):
'''simple docstring'''
__a = True
__a = FalconModel(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
__a = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Dict , ):
'''simple docstring'''
__a = FalconForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Dict , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Dict , __lowercase : int , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Optional[int] , ):
'''simple docstring'''
__a = True
__a = True
__a = FalconForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )["""hidden_states"""][0]
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )["""hidden_states"""][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Tuple =(FalconForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : Union[str, Any] =(
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Any =False
__lowerCamelCase : Optional[Any] =False
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = FalconModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a , *__a = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__a = alibi
self.model_tester.create_and_check_model(__lowercase , *__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict["""input_ids"""]
__a = input_ids.ne(1 ).to(__lowercase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = FalconForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = """single_label_classification"""
__a = input_dict["""input_ids"""]
__a = input_ids.ne(1 ).to(__lowercase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = FalconForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = input_dict["""input_ids"""]
__a = FalconForCausalLM(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , use_cache=__lowercase )
__a = input_ids.shape[0]
__a = model._convert_to_rw_cache(result.past_key_values )
__a = model._convert_cache_to_standard_format(__lowercase , __lowercase )
for layer in range(len(__lowercase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = """multi_label_classification"""
__a = input_dict["""input_ids"""]
__a = input_ids.ne(1 ).to(__lowercase )
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a = FalconForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowercase , """use_cache""" ):
return
__a = model_class(__lowercase ).to(__lowercase )
if "use_cache" not in inputs:
__a = True
__a = model(**__lowercase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__a = (
getattr(__lowercase , """decoder_layers""" , __lowercase )
or getattr(__lowercase , """num_decoder_layers""" , __lowercase )
or config.num_hidden_layers
)
__a = getattr(__lowercase , """num_kv_heads""" , config.num_attention_heads )
__a = getattr(__lowercase , """d_model""" , config.hidden_size )
__a = embed_dim // num_attention_heads
__a = outputs["""past_key_values"""]
self.assertEqual(len(__lowercase ) , __lowercase )
__a , __a = inputs["""input_ids"""].shape
for i in range(__lowercase ):
if config.new_decoder_architecture:
__a = config.num_attention_heads
elif config.multi_query:
__a = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
__a = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(__lowercase )
__a = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowercase )
__a = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
__a = model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=19 )
__a = tokenizer.batch_decode(__lowercase )[0]
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = FalconForCausalLM.from_pretrained(__lowercase )
model.eval()
model.to(__lowercase )
__a = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowercase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=4 )
model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=4 )
model.generate(**__lowercase , num_beams=2 , max_new_tokens=4 )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = FalconForCausalLM.from_pretrained(__lowercase )
model.eval()
model.to(device=__lowercase )
__a = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowercase )
# Test results are the same with and without cache
__a = model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=20 , use_cache=__lowercase )
__a = model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=20 , use_cache=__lowercase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 302
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 302
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 1
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Any =['keras_nlp']
def __init__( self : int , *__lowercase : Optional[int] , **__lowercase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 302
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 1
|
from math import factorial, pi
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 30 ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
__a = float(_SCREAMING_SNAKE_CASE )
__a = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 30 ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
__a = float(_SCREAMING_SNAKE_CASE )
__a = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 302
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 1
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = multiprocessing.Manager()
__a = manager.list()
__a = multiprocessing.Process(target=_SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__a = shutil.rmtree
__a = os.rmdir
__a = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__a = {}
with swallow_io():
with time_limit(_SCREAMING_SNAKE_CASE ):
exec(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
__a = rmtree
__a = rmdir
__a = chdir
@contextlib.contextmanager
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
def signal_handler(_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , _SCREAMING_SNAKE_CASE )
signal.signal(signal.SIGALRM , _SCREAMING_SNAKE_CASE )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = WriteOnlyStringIO()
with contextlib.redirect_stdout(_SCREAMING_SNAKE_CASE ):
with contextlib.redirect_stderr(_SCREAMING_SNAKE_CASE ):
with redirect_stdin(_SCREAMING_SNAKE_CASE ):
yield
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(_SCREAMING_SNAKE_CASE ):
yield dirname
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
pass
class SCREAMING_SNAKE_CASE ( io.StringIO ):
def UpperCamelCase_ ( self : List[str] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
raise OSError
def UpperCamelCase_ ( self : Any , *__lowercase : List[str] , **__lowercase : str ):
'''simple docstring'''
raise OSError
def UpperCamelCase_ ( self : int , *__lowercase : Tuple , **__lowercase : Dict ):
'''simple docstring'''
raise OSError
def UpperCamelCase_ ( self : Any , *__lowercase : List[Any] , **__lowercase : Dict ):
'''simple docstring'''
return False
class SCREAMING_SNAKE_CASE ( contextlib._RedirectStream ): # type: ignore
__lowerCamelCase : Tuple ='stdin'
@contextlib.contextmanager
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root == ".":
yield
return
__a = os.getcwd()
os.chdir(_SCREAMING_SNAKE_CASE )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict=None ):
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__a = None
__a = None
import os
__a = """1"""
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
__a = None
import shutil
__a = None
__a = None
__a = None
import subprocess
__a = None # type: ignore
__a = None
import sys
__a = None
__a = None
__a = None
__a = None
__a = None
| 302
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Tuple ='openai/whisper-base'
__lowerCamelCase : Tuple =(
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
__lowerCamelCase : str ='transcriber'
__lowerCamelCase : Any =WhisperProcessor
__lowerCamelCase : int =WhisperForConditionalGeneration
__lowerCamelCase : Optional[Any] =['audio']
__lowerCamelCase : Dict =['text']
def UpperCamelCase_ ( self : int , __lowercase : Dict ):
'''simple docstring'''
return self.pre_processor(__lowercase , return_tensors="""pt""" ).input_features
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] ):
'''simple docstring'''
return self.model.generate(inputs=__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(__lowercase , skip_special_tokens=__lowercase )[0]
| 302
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='perceiver'
def __init__( self : Dict , __lowercase : List[Any]=256 , __lowercase : List[Any]=1280 , __lowercase : Tuple=768 , __lowercase : str=1 , __lowercase : List[str]=26 , __lowercase : Any=8 , __lowercase : Dict=8 , __lowercase : Optional[Any]=None , __lowercase : str=None , __lowercase : Optional[int]="kv" , __lowercase : Dict=1 , __lowercase : Optional[int]=1 , __lowercase : List[str]="gelu" , __lowercase : List[str]=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Tuple=1E-12 , __lowercase : Optional[Any]=True , __lowercase : List[str]=262 , __lowercase : List[Any]=2048 , __lowercase : str=56 , __lowercase : str=[368, 496] , __lowercase : Dict=16 , __lowercase : Union[str, Any]=1920 , __lowercase : Tuple=16 , __lowercase : List[str]=[1, 16, 224, 224] , **__lowercase : str , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = num_latents
__a = d_latents
__a = d_model
__a = num_blocks
__a = num_self_attends_per_block
__a = num_self_attention_heads
__a = num_cross_attention_heads
__a = qk_channels
__a = v_channels
__a = cross_attention_shape_for_attention
__a = self_attention_widening_factor
__a = cross_attention_widening_factor
__a = hidden_act
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = use_query_residual
# masked language modeling attributes
__a = vocab_size
__a = max_position_embeddings
# image classification attributes
__a = image_size
# flow attributes
__a = train_size
# multimodal autoencoding attributes
__a = num_frames
__a = audio_samples_per_frame
__a = samples_per_patch
__a = output_shape
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 1E-4
def UpperCamelCase_ ( self : Any , __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional[TensorType] = None , __lowercase : int = 3 , __lowercase : int = 40 , __lowercase : int = 40 , ):
'''simple docstring'''
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__lowercase , __lowercase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = preprocessor.num_special_tokens_to_add(__lowercase )
__a = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
__a = [""" """.join(["""a"""] ) * seq_length] * batch_size
__a = dict(preprocessor(__lowercase , return_tensors=__lowercase ) )
__a = inputs.pop("""input_ids""" )
return inputs
elif isinstance(__lowercase , __lowercase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch )
__a = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
__a = dict(preprocessor(images=__lowercase , return_tensors=__lowercase ) )
__a = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 302
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowerCamelCase : str =field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowerCamelCase : ClassVar[Features] =Features({'text': Value('string' )} )
__lowerCamelCase : ClassVar[Features] =Features({'labels': ClassLabel} )
__lowerCamelCase : str ="text"
__lowerCamelCase : str ="labels"
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __lowercase ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
__a = copy.deepcopy(self )
__a = self.label_schema.copy()
__a = features[self.label_column]
__a = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 302
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=True , __lowercase : str=False , __lowercase : Union[str, Any]="<s>" , __lowercase : Any="</s>" , __lowercase : Optional[Any]="<unk>" , __lowercase : Tuple="<sep>" , __lowercase : Optional[int]="<pad>" , __lowercase : Dict="<cls>" , __lowercase : Optional[Any]="<mask>" , __lowercase : str=["<eop>", "<eod>"] , __lowercase : Optional[Dict[str, Any]] = None , **__lowercase : Union[str, Any] , ):
'''simple docstring'''
__a = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__a = 3
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
__a = jieba
__a = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : List[Any] , __lowercase : List[str] ):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
__a = """ """.join(inputs.strip().split() )
else:
__a = inputs
__a = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__a = unicodedata.normalize("""NFKD""" , __lowercase )
__a = """""".join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
__a = outputs.lower()
return outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
__a = self.preprocess_text(__lowercase )
__a = self.sp_model.encode(__lowercase , out_type=__lowercase )
__a = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a = cur_pieces[1:]
else:
__a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[int] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Any ):
'''simple docstring'''
__a = """""".join(__lowercase ).replace(__lowercase , """ """ ).strip()
return out_string
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1, 1]
return ([0] * len(__lowercase )) + [1, 1]
def UpperCamelCase_ ( self : Any , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase_ ( self : str , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def UpperCamelCase_ ( self : str , *__lowercase : Dict , **__lowercase : List[str] ):
'''simple docstring'''
__a = super()._decode(*__lowercase , **__lowercase )
__a = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 302
|
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__a = [144, 192, 240]
__a = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__a = [96, 120, 144]
__a = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__a = [64, 80, 96]
__a = [16, 16, 24, 48, 64, 80, 320]
__a = 0.05
__a = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
__a = 512
__a = 16
__a = 21
__a = """pascal-voc-id2label.json"""
else:
__a = 1000
__a = """imagenet-1k-id2label.json"""
__a = """huggingface/label-files"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if f"layer_{i}." in name:
__a = name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
__a = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
__a = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
__a = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
__a = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
__a = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
__a = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
__a = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
__a = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
__a = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
__a = name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
__a = name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
__a = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
__a = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
__a = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
__a = name.replace(f".global_rep.{i}.weight" , """.layernorm.weight""" )
if f".global_rep.{i}.bias" in name:
__a = name.replace(f".global_rep.{i}.bias" , """.layernorm.bias""" )
if ".global_rep." in name:
__a = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
__a = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
__a = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
__a = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
__a = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
__a = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
__a = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
__a = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
__a = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
__a = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
__a = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
__a = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
__a = """mobilevit.""" + name
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
if base_model:
__a = """"""
else:
__a = """mobilevit."""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
__a = key[8:]
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[0][6:] ) - 1
__a = int(key_split[3] )
__a = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
__a = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__a = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
__a = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
__a = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
__a = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
__a = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__a = image_processor(images=prepare_img() , return_tensors="""pt""" )
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__a = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__a = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__a = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__a = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__a = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__a = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
__a = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
__a = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 302
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 1
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
lowerCamelCase__ = {"""mgp-str""": 27}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =VOCAB_FILES_NAMES
__lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , __lowercase : Any , __lowercase : Tuple="[GO]" , __lowercase : str="[GO]" , __lowercase : Tuple="[s]" , __lowercase : Optional[Any]="[GO]" , **__lowercase : List[Any] ):
'''simple docstring'''
super().__init__(
unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase , )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
__a = json.load(__lowercase )
__a = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return len(self.vocab )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = []
for s in text:
char_tokens.extend(__lowercase )
return char_tokens
def UpperCamelCase_ ( self : str , __lowercase : Optional[Any] ):
'''simple docstring'''
return self.vocab.get(__lowercase , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
return self.decoder.get(__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__lowercase ) )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
return (vocab_file,)
| 302
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 1
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCamelCase__ = get_logger(__name__)
lowerCamelCase__ = Path(__file__).parent / """model_card_template.md"""
lowerCamelCase__ = uuida().hex
lowerCamelCase__ = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase__ = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[Dict, str, None] = None ):
"""simple docstring"""
__a = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
return ua
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if token is None:
__a = HfFolder.get_token()
if organization is None:
__a = whoami(_SCREAMING_SNAKE_CASE )["""name"""]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(_SCREAMING_SNAKE_CASE , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
__a = args.hub_token if hasattr(_SCREAMING_SNAKE_CASE , """hub_token""" ) else None
__a = get_full_repo_name(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
__a = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_SCREAMING_SNAKE_CASE , model_name=_SCREAMING_SNAKE_CASE , repo_name=_SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(_SCREAMING_SNAKE_CASE , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_SCREAMING_SNAKE_CASE , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_SCREAMING_SNAKE_CASE , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_SCREAMING_SNAKE_CASE , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_SCREAMING_SNAKE_CASE , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_SCREAMING_SNAKE_CASE , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_SCREAMING_SNAKE_CASE , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(_SCREAMING_SNAKE_CASE , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_SCREAMING_SNAKE_CASE , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
__a = os.path.join(args.output_dir , """README.md""" )
model_card.save(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
__a = str(Path(_SCREAMING_SNAKE_CASE ).as_posix() )
__a = re.search(r"""snapshots/([^/]+)/""" , _SCREAMING_SNAKE_CASE )
if search is None:
return None
__a = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_SCREAMING_SNAKE_CASE ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCamelCase__ = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowerCamelCase__ = os.path.join(hf_cache_home, """diffusers""")
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
__a = DIFFUSERS_CACHE
if old_cache_dir is None:
__a = old_diffusers_cache
__a = Path(_SCREAMING_SNAKE_CASE ).expanduser()
__a = Path(_SCREAMING_SNAKE_CASE ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a = new_cache_dir / old_blob_path.relative_to(_SCREAMING_SNAKE_CASE )
new_blob_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
os.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
os.symlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCamelCase__ = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowerCamelCase__ = 0
else:
with open(cache_version_file) as f:
try:
lowerCamelCase__ = int(f.read())
except ValueError:
lowerCamelCase__ = 0
if cache_version < 1:
lowerCamelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowerCamelCase__ = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
__a = weights_name.split(""".""" )
__a = splits[:-1] + [variant] + splits[-1:]
__a = """.""".join(_SCREAMING_SNAKE_CASE )
return weights_name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , *,
_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict=None , ):
"""simple docstring"""
__a = str(_SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
return pretrained_model_name_or_path
elif os.path.isdir(_SCREAMING_SNAKE_CASE ):
if os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
# Load from a PyTorch checkpoint
__a = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
__a = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_SCREAMING_SNAKE_CASE ).base_version ) >= version.parse("""0.20.0""" )
):
try:
__a = hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , _SCREAMING_SNAKE_CASE , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}' so that the correct variant file can be added." , _SCREAMING_SNAKE_CASE , )
try:
# 2. Load model file as usual
__a = hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"""this model name. Check the model page at """
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 302
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
from timeit import timeit
lowerCamelCase__ = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = 0
__a = len(_SCREAMING_SNAKE_CASE ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = len(_SCREAMING_SNAKE_CASE ) // 2
__a = len(_SCREAMING_SNAKE_CASE )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) <= 2:
return True
if s[0] == s[len(_SCREAMING_SNAKE_CASE ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return s == s[::-1]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = f"all({name}(key) is value for key, value in test_data.items())"
__a = f"from __main__ import test_data, {name}"
__a = 50_0000
__a = timeit(stmt=_SCREAMING_SNAKE_CASE , setup=_SCREAMING_SNAKE_CASE , number=_SCREAMING_SNAKE_CASE )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 302
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
def get_matched_characters(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_SCREAMING_SNAKE_CASE )
__a = f"{_stra[0:_stra.index(_SCREAMING_SNAKE_CASE )]} {_stra[_stra.index(_SCREAMING_SNAKE_CASE ) + 1:]}"
return "".join(_SCREAMING_SNAKE_CASE )
# matching characters
__a = get_matched_characters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = get_matched_characters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(_SCREAMING_SNAKE_CASE )
+ match_count / len(_SCREAMING_SNAKE_CASE )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowerCamelCase__ = int(input("""Enter number: """).strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 302
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""OwlViTFeatureExtractor"""]
lowerCamelCase__ = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 1
|
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
__a = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase__ = HUGGINGFACE_HUB_CACHE
lowerCamelCase__ = """config.json"""
lowerCamelCase__ = """diffusion_pytorch_model.bin"""
lowerCamelCase__ = """diffusion_flax_model.msgpack"""
lowerCamelCase__ = """model.onnx"""
lowerCamelCase__ = """diffusion_pytorch_model.safetensors"""
lowerCamelCase__ = """weights.pb"""
lowerCamelCase__ = """https://huggingface.co"""
lowerCamelCase__ = default_cache_path
lowerCamelCase__ = """diffusers_modules"""
lowerCamelCase__ = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase__ = ["""fp16""", """non-ema"""]
lowerCamelCase__ = """.self_attn"""
| 302
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[str] ='pixel_values'
__lowerCamelCase : Any =False
__lowerCamelCase : Optional[Any] =TimmBackboneConfig
def __init__( self : Dict , __lowercase : List[str] , **__lowercase : int ):
'''simple docstring'''
requires_backends(self , """timm""" )
super().__init__(__lowercase )
__a = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F"backbone {config.backbone} is not supported by timm." )
if hasattr(__lowercase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
__a = getattr(__lowercase , """use_pretrained_backbone""" , __lowercase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
__a = config.out_indices if getattr(__lowercase , """out_indices""" , __lowercase ) is not None else (-1,)
__a = timm.create_model(
config.backbone , pretrained=__lowercase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__lowercase , **__lowercase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__a = self._backbone.return_layers
__a = {layer["""module"""]: str(__lowercase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__lowercase )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , __lowercase : str , *__lowercase : Union[str, Any] , **__lowercase : Dict ):
'''simple docstring'''
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
__a = kwargs.pop("""config""" , TimmBackboneConfig() )
__a = kwargs.pop("""use_timm_backbone""" , __lowercase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
__a = kwargs.pop("""num_channels""" , config.num_channels )
__a = kwargs.pop("""features_only""" , config.features_only )
__a = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
__a = kwargs.pop("""out_indices""" , config.out_indices )
__a = TimmBackboneConfig(
backbone=__lowercase , num_channels=__lowercase , features_only=__lowercase , use_pretrained_backbone=__lowercase , out_indices=__lowercase , )
return super()._from_config(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None , __lowercase : List[Any]=None , __lowercase : Dict=None , **__lowercase : int ):
'''simple docstring'''
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__a = self._all_layers
__a = self._backbone(__lowercase , **__lowercase )
__a = self._return_layers
__a = tuple(hidden_states[i] for i in self.out_indices )
else:
__a = self._backbone(__lowercase , **__lowercase )
__a = None
__a = tuple(__lowercase )
__a = tuple(__lowercase ) if hidden_states is not None else None
if not return_dict:
__a = (feature_maps,)
if output_hidden_states:
__a = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__lowercase , hidden_states=__lowercase , attentions=__lowercase )
| 302
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
__a = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__a = 1
if upper_limit > 0:
__a = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase__ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 302
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 1
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_SCREAMING_SNAKE_CASE , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = _distribute_shards(**_SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a = _split_gen_kwargs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE )
else:
__a = _number_of_shards_in_gen_kwargs(_SCREAMING_SNAKE_CASE )
assert out == expected
| 302
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 1
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : tuple[int, ...] ):
"""simple docstring"""
__a = ""
__a = 42
__a = 42
__a = 42
for keychar, cipherchar in zip(cycle(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
__a = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_SCREAMING_SNAKE_CASE )
return decoded
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
__a = []
for key in product(_SCREAMING_SNAKE_CASE , repeat=3 ):
__a = try_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if encoded is not None:
possibles.append(_SCREAMING_SNAKE_CASE )
return possibles
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[str] , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "p059_cipher.txt" ):
"""simple docstring"""
__a = 42
__a = 42
__a = 42
__a = 42
__a = Path(_SCREAMING_SNAKE_CASE ).parent.joinpath(_SCREAMING_SNAKE_CASE ).read_text(encoding="""utf-8""" )
__a = [int(_SCREAMING_SNAKE_CASE ) for number in data.strip().split(""",""" )]
__a = filter_valid_chars(_SCREAMING_SNAKE_CASE )
for common_word in COMMON_WORDS:
__a = filter_common_word(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
__a = possibles[0]
return sum(ord(_SCREAMING_SNAKE_CASE ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 302
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 1
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_SCREAMING_SNAKE_CASE , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_SCREAMING_SNAKE_CASE , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_SCREAMING_SNAKE_CASE , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_SCREAMING_SNAKE_CASE , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_SCREAMING_SNAKE_CASE , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
__a = parser.parse_args()
return args
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
def fn(_SCREAMING_SNAKE_CASE : str ):
return tokenizer(examples["""text"""] )
return fn
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
__a = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
__a = tf.train.Features(feature=_SCREAMING_SNAKE_CASE )
__a = tf.train.Example(features=_SCREAMING_SNAKE_CASE )
__a = example.SerializeToString()
records.append(_SCREAMING_SNAKE_CASE )
return records
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__a = min(len(_SCREAMING_SNAKE_CASE ) , args.limit )
__a = dataset.select(range(_SCREAMING_SNAKE_CASE ) )
print(f"Limiting the dataset to {args.limit} entries." )
__a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__a = os.path.join(args.output_dir , args.split )
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
else:
__a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__a = tokenize_function(_SCREAMING_SNAKE_CASE )
__a = dataset.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_SCREAMING_SNAKE_CASE : Optional[int] ):
# Concatenate all texts.
__a = {k: sum(examples[k] , [] ) for k in examples.keys()}
__a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__a = {
k: [t[i : i + args.max_length] for i in range(0 , _SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__a = dataset_tokenized.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=1000 , num_proc=4 )
__a = 0
__a = 0
for shard in range(0 , len(_SCREAMING_SNAKE_CASE ) , args.shard_size ):
__a = grouped_dataset[shard : shard + args.shard_size]
__a = len(dataset_snapshot["""input_ids"""] )
__a = os.path.join(_SCREAMING_SNAKE_CASE , f"dataset-{shard_count}-{records_containing}.tfrecord" )
__a = get_serialized_examples(_SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(_SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__a = serialized_examples[i]
out_file.write(_SCREAMING_SNAKE_CASE )
print("""Wrote file {} containing {} records""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , """w""" ) as f:
print(f"Total {args.split} records: {total_records}" , file=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
main(args)
| 302
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 1
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =''
__lowerCamelCase : str =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__lowerCamelCase : str =None # compression type in fsspec. ex: "gzip"
__lowerCamelCase : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Union[str, Any] , __lowercase : str = "" , __lowercase : Optional[str] = None , __lowercase : Optional[dict] = None , **__lowercase : List[str] ):
'''simple docstring'''
super().__init__(self , **__lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__a = fsspec.open(
__lowercase , mode="""rb""" , protocol=__lowercase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__a = os.path.basename(self.file.path.split("""::""" )[0] )
__a = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
__a = None
@classmethod
def UpperCamelCase_ ( cls : Tuple , __lowercase : Dict ):
'''simple docstring'''
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__lowercase ).lstrip("""/""" )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
__a = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
__a = {f["""name"""]: f}
def UpperCamelCase_ ( self : int , __lowercase : str ):
'''simple docstring'''
return self.file.open().read()
def UpperCamelCase_ ( self : Dict , __lowercase : str , __lowercase : str = "rb" , __lowercase : Any=None , __lowercase : Optional[Any]=True , __lowercase : Any=None , **__lowercase : Any , ):
'''simple docstring'''
__a = self._strip_protocol(__lowercase )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : str ='bz2'
__lowerCamelCase : Any ='bz2'
__lowerCamelCase : List[str] ='.bz2'
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='gzip'
__lowerCamelCase : Optional[Any] ='gzip'
__lowerCamelCase : List[Any] ='.gz'
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[str] ='lz4'
__lowerCamelCase : Optional[Any] ='lz4'
__lowerCamelCase : Dict ='.lz4'
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : str ='xz'
__lowerCamelCase : Optional[Any] ='xz'
__lowerCamelCase : Dict ='.xz'
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='zstd'
__lowerCamelCase : str ='zstd'
__lowerCamelCase : List[Any] ='.zst'
def __init__( self : Dict , __lowercase : str , __lowercase : str = "rb" , __lowercase : Optional[str] = None , __lowercase : Optional[dict] = None , __lowercase : int = DEFAULT_BLOCK_SIZE , **__lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
fo=__lowercase , mode=__lowercase , target_protocol=__lowercase , target_options=__lowercase , block_size=__lowercase , **__lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__a = self.file.__enter__
class SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : str ):
'''simple docstring'''
__a = file_
def __enter__( self : Optional[int] ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : List[Any] , *__lowercase : Optional[int] , **__lowercase : Optional[Any] ):
'''simple docstring'''
self._file.__exit__(*__lowercase , **__lowercase )
def __iter__( self : str ):
'''simple docstring'''
return iter(self._file )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return next(self._file )
def __getattr__( self : Tuple , __lowercase : Any ):
'''simple docstring'''
return getattr(self._file , __lowercase )
def fixed_enter(*__lowercase : Optional[Any] , **__lowercase : Optional[int] ):
return WrappedFile(_enter(*__lowercase , **__lowercase ) )
__a = fixed_enter
| 302
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : UNetaDModel
__lowerCamelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] , __lowercase : UNetaDModel , __lowercase : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__( self : Tuple , __lowercase : int = 1 , __lowercase : int = 2000 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , **__lowercase : str , ):
'''simple docstring'''
__a = self.unet.config.sample_size
__a = (batch_size, 3, img_size, img_size)
__a = self.unet
__a = randn_tensor(__lowercase , generator=__lowercase ) * self.scheduler.init_noise_sigma
__a = sample.to(self.device )
self.scheduler.set_timesteps(__lowercase )
self.scheduler.set_sigmas(__lowercase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__a = self.unet(__lowercase , __lowercase ).sample
__a = self.scheduler.step_correct(__lowercase , __lowercase , generator=__lowercase ).prev_sample
# prediction step
__a = model(__lowercase , __lowercase ).sample
__a = self.scheduler.step_pred(__lowercase , __lowercase , __lowercase , generator=__lowercase )
__a , __a = output.prev_sample, output.prev_sample_mean
__a = sample_mean.clamp(0 , 1 )
__a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__lowercase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowercase )
| 302
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = []
__a = []
__a = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
__a = len(_SCREAMING_SNAKE_CASE ) if (len(_SCREAMING_SNAKE_CASE ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(_SCREAMING_SNAKE_CASE ) , """Postfix""".center(_SCREAMING_SNAKE_CASE ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_SCREAMING_SNAKE_CASE ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_SCREAMING_SNAKE_CASE ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_SCREAMING_SNAKE_CASE ) == 0:
stack.append(_SCREAMING_SNAKE_CASE ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_SCREAMING_SNAKE_CASE ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_SCREAMING_SNAKE_CASE ) # push x to stack
print(
x.center(8 ) , ("""""".join(_SCREAMING_SNAKE_CASE )).ljust(_SCREAMING_SNAKE_CASE ) , ("""""".join(_SCREAMING_SNAKE_CASE )).ljust(_SCREAMING_SNAKE_CASE ) , sep=""" | """ , ) # Output in tabular format
while len(_SCREAMING_SNAKE_CASE ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(_SCREAMING_SNAKE_CASE )).ljust(_SCREAMING_SNAKE_CASE ) , ("""""".join(_SCREAMING_SNAKE_CASE )).ljust(_SCREAMING_SNAKE_CASE ) , sep=""" | """ , ) # Output in tabular format
return "".join(_SCREAMING_SNAKE_CASE ) # return Postfix as str
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__a = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if infix[i] == "(":
__a = """)""" # change "(" to ")"
elif infix[i] == ")":
__a = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(_SCREAMING_SNAKE_CASE ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCamelCase__ = input("""\nEnter an Infix Equation = """) # Input an Infix equation
lowerCamelCase__ = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 302
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302
| 1
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : List[str]=13 , __lowercase : Dict=30 , __lowercase : Any=2 , __lowercase : Optional[Any]=3 , __lowercase : Dict=True , __lowercase : Optional[int]=True , __lowercase : Any=32 , __lowercase : Union[str, Any]=2 , __lowercase : int=4 , __lowercase : Any=37 , __lowercase : List[str]="gelu" , __lowercase : int=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : str=10 , __lowercase : Optional[Any]=0.02 , __lowercase : Union[str, Any]=3 , __lowercase : Any=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : List[Any] ):
'''simple docstring'''
__a = TFViTModel(config=__lowercase )
__a = model(__lowercase , training=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__a = self.image_size // 2
__a = pixel_values[:, :, :image_size, :image_size]
__a = model(__lowercase , interpolate_pos_encoding=__lowercase , training=__lowercase )
__a = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : str , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : List[Any] ):
'''simple docstring'''
__a = self.type_sequence_label_size
__a = TFViTForImageClassification(__lowercase )
__a = model(__lowercase , labels=__lowercase , training=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__a = self.image_size // 2
__a = pixel_values[:, :, :image_size, :image_size]
__a = model(__lowercase , interpolate_pos_encoding=__lowercase , training=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = TFViTForImageClassification(__lowercase )
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCamelCase : Optional[int] =(
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase : Tuple =False
__lowerCamelCase : Any =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = TFViTModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""tf""" )
# forward pass
__a = model(**__lowercase )
# verify the logits
__a = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
| 302
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : str =['image_processor', 'tokenizer']
__lowerCamelCase : Union[str, Any] ='ChineseCLIPImageProcessor'
__lowerCamelCase : Optional[int] =('BertTokenizer', 'BertTokenizerFast')
def __init__( self : List[Any] , __lowercase : List[str]=None , __lowercase : Tuple=None , **__lowercase : Dict ):
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowercase , )
__a = kwargs.pop("""feature_extractor""" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowercase , __lowercase )
__a = self.image_processor
def __call__( self : int , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]=None , **__lowercase : Union[str, Any] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__a = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
__a = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def UpperCamelCase_ ( self : Dict , *__lowercase : Optional[Any] , **__lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Dict , *__lowercase : str , **__lowercase : Any ):
'''simple docstring'''
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowercase , )
return self.image_processor_class
| 302
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 1
|
from statistics import mean
import numpy as np
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 0
# Number of processes finished
__a = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__a = [0] * no_of_process
# List to include calculation results
__a = [0] * no_of_process
# Sort by arrival time.
__a = [burst_time[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
__a = [process_name[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
arrival_time.sort()
while no_of_process > finished_process_count:
__a = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__a = arrival_time[i]
__a = 0
# Index showing the location of the process being performed
__a = 0
# Saves the current response ratio.
__a = 0
for i in range(0 , _SCREAMING_SNAKE_CASE ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__a = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__a = temp
__a = i
# Calculate the turn around time
__a = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__a = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = [0] * no_of_process
for i in range(0 , _SCREAMING_SNAKE_CASE ):
__a = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase__ = 5
lowerCamelCase__ = ["""A""", """B""", """C""", """D""", """E"""]
lowerCamelCase__ = [1, 2, 3, 4, 5]
lowerCamelCase__ = [1, 2, 3, 4, 5]
lowerCamelCase__ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase__ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 302
|
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
lowerCamelCase__ = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCamelCase__ = frozenset(["""prompt""", """negative_prompt"""])
lowerCamelCase__ = frozenset([])
lowerCamelCase__ = frozenset(["""image"""])
lowerCamelCase__ = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase__ = frozenset(["""image"""])
lowerCamelCase__ = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCamelCase__ = frozenset(["""prompt""", """image""", """negative_prompt"""])
lowerCamelCase__ = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCamelCase__ = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
lowerCamelCase__ = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase__ = frozenset(["""image""", """mask_image"""])
lowerCamelCase__ = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase__ = frozenset(["""example_image""", """image""", """mask_image"""])
lowerCamelCase__ = frozenset(["""class_labels"""])
lowerCamelCase__ = frozenset(["""class_labels"""])
lowerCamelCase__ = frozenset(["""batch_size"""])
lowerCamelCase__ = frozenset([])
lowerCamelCase__ = frozenset(["""batch_size"""])
lowerCamelCase__ = frozenset([])
lowerCamelCase__ = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCamelCase__ = frozenset(["""prompt""", """negative_prompt"""])
lowerCamelCase__ = frozenset(["""input_tokens"""])
lowerCamelCase__ = frozenset(["""input_tokens"""])
| 302
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__lowercase , """depth_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowercase : Optional[Any] , __lowercase : Any=13 , __lowercase : Any=3 , __lowercase : Union[str, Any]=32 , __lowercase : Dict=0.25 , __lowercase : Optional[Any]=8 , __lowercase : Any=8 , __lowercase : Optional[Any]=6 , __lowercase : Union[str, Any]=32 , __lowercase : int=True , __lowercase : int=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]="relu6" , __lowercase : Optional[Any]=1280 , __lowercase : Union[str, Any]=0.1 , __lowercase : str=0.02 , __lowercase : str=True , __lowercase : List[str]=True , __lowercase : Any=10 , __lowercase : Dict=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = depth_divisible_by
__a = min_depth
__a = expand_ratio
__a = tf_padding
__a = output_stride
__a = first_layer_is_expansion
__a = finegrained_output
__a = hidden_act
__a = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Tuple , __lowercase : int , __lowercase : Any , __lowercase : Optional[int] ):
'''simple docstring'''
__a = MobileNetVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCamelCase_ ( self : Any , __lowercase : str , __lowercase : int , __lowercase : int , __lowercase : Dict ):
'''simple docstring'''
__a = self.num_labels
__a = MobileNetVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[int] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileNetVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] =(
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Union[str, Any] =(
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Any =False
__lowerCamelCase : Any =False
__lowerCamelCase : List[str] =False
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileNetVaModelTester(self )
__a = MobileNetVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : Dict , __lowercase : Optional[int] , __lowercase : List[Any] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 16
self.assertEqual(len(__lowercase ) , __lowercase )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileNetVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__a = model.to(__lowercase )
__a = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
| 302
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 1
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = "cpu" , _SCREAMING_SNAKE_CASE : Union[str, None] = None ):
"""simple docstring"""
__a = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
__a = v.half()
if save_path is None: # overwrite src_path
__a = src_path
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
fire.Fire(convert)
| 302
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 , _SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 302
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 1
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , __lowercase : int = 101 ):
'''simple docstring'''
__a = length
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.length
def __getitem__( self : List[str] , __lowercase : Tuple ):
'''simple docstring'''
return i
class SCREAMING_SNAKE_CASE :
def __call__( self : Optional[int] , __lowercase : List[str] ):
'''simple docstring'''
return {"input_ids": torch.tensor(__lowercase ), "labels": torch.tensor(__lowercase )}
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__a = nn.Linear(120 , 80 )
def UpperCamelCase_ ( self : Dict , __lowercase : Optional[Any] , __lowercase : Optional[int]=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@require_torch_neuroncore
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
__a = self.get_auto_remove_tmp_dir()
__a = F"--output_dir {output_dir}".split()
__a = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@require_torch_multi_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
__a = self.get_auto_remove_tmp_dir()
__a = F"--output_dir {output_dir}".split()
__a = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCamelCase__ = HfArgumentParser((TrainingArguments,))
lowerCamelCase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCamelCase__ = DummyDataset(dataset_length)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : EvalPrediction ):
"""simple docstring"""
__a = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
__a = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
lowerCamelCase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCamelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ = 2
lowerCamelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ = None
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
import math
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 0
__a = 0
while num > 0:
__a = num % 8
__a = octal + (remainder * math.floor(math.pow(10 , _SCREAMING_SNAKE_CASE ) ))
counter += 1
__a = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"0o{int(_SCREAMING_SNAKE_CASE )}"
def lowerCAmelCase__ ( ):
"""simple docstring"""
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 302
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 1
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : str , __lowercase : Any ):
'''simple docstring'''
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
for a, b in zip(__lowercase , __lowercase ):
self.assertAlmostEqual(__lowercase , __lowercase , delta=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices("""CPU""" )
if len(__lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type="""CPU""" )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=__lowercase )
def accumulate_on_replica(__lowercase : str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowercase : List[str] , __lowercase : Optional[Any] ):
with strategy.scope():
__a = strategy.experimental_local_results(__lowercase )
local_variables[0].assign(__lowercase )
local_variables[1].assign(__lowercase )
strategy.run(__lowercase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowercase )
def _check_local_values(__lowercase : Union[str, Any] , __lowercase : int ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowercase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , __lowercase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : str , __lowercase : str , __lowercase : Dict=13 , __lowercase : Dict=7 , __lowercase : str=True , __lowercase : List[Any]=True , __lowercase : List[str]=True , __lowercase : List[str]=True , __lowercase : Any=99 , __lowercase : Dict=32 , __lowercase : Dict=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : Dict=0.1 , __lowercase : Dict=0.1 , __lowercase : Optional[Any]=512 , __lowercase : Optional[int]=16 , __lowercase : str=2 , __lowercase : Optional[Any]=0.02 , __lowercase : Tuple=4 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[Any] =True
__lowerCamelCase : Optional[int] =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 302
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 1
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__a = json.loads(_SCREAMING_SNAKE_CASE )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__a = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__a = json.loads(_SCREAMING_SNAKE_CASE )
if not mpi_options.get("""sagemaker_mpi_enabled""" , _SCREAMING_SNAKE_CASE ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : str =field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , __lowercase , )
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__a = torch.device("""cpu""" )
__a = 0
elif is_sagemaker_model_parallel_available():
__a = smp.local_rank()
__a = torch.device("""cuda""" , __lowercase )
__a = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
__a = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__a = torch.device("""cuda""" , self.local_rank )
__a = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__a = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__a = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
__a = torch.device("""cuda""" , self.local_rank )
__a = 1
if device.type == "cuda":
torch.cuda.set_device(__lowercase )
return device
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return False
| 302
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 1
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(EulerDiscreteScheduler,)
__lowerCamelCase : str =10
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowercase , beta_end=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(__lowercase , __lowercase )
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
__a = output.prev_sample
__a = torch.sum(torch.abs(__lowercase ) )
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type="""v_prediction""" )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(__lowercase , __lowercase )
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
__a = output.prev_sample
__a = torch.sum(torch.abs(__lowercase ) )
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowercase )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__a = sample.to(__lowercase )
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(__lowercase , __lowercase )
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
__a = output.prev_sample
__a = torch.sum(torch.abs(__lowercase ) )
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase , use_karras_sigmas=__lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowercase )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__a = sample.to(__lowercase )
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(__lowercase , __lowercase )
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
__a = output.prev_sample
__a = torch.sum(torch.abs(__lowercase ) )
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1E-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
| 302
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 1
|
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : List[Any] ):
'''simple docstring'''
# test for the above condition
self.test()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = 0
__a = False
while not completed:
if counter == 1:
self.reset()
__a = self.advance()
if not self.does_advance(__lowercase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
__a , __a , __a = self.update(__lowercase )
counter += 1
if counter > 10000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self : Any , __lowercase : int ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self : Tuple , __lowercase : int ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Any=False ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[Any] , __lowercase : List[int] ):
'''simple docstring'''
super(__lowercase , self ).__init__()
if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(__lowercase , __lowercase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
__a = token_ids
__a = len(self.token_ids )
__a = -1 # the index of the currently fulfilled step
__a = False
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self : Any , __lowercase : int ):
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self : List[str] , __lowercase : int ):
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}" )
__a = False
__a = False
__a = False
if self.does_advance(__lowercase ):
self.fulfilled_idx += 1
__a = True
if self.fulfilled_idx == (self.seqlen - 1):
__a = True
__a = completed
else:
# failed to make progress.
__a = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = False
__a = 0
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self : str , __lowercase : str=False ):
'''simple docstring'''
__a = PhrasalConstraint(self.token_ids )
if stateful:
__a = self.seqlen
__a = self.fulfilled_idx
__a = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : List[List[int]] , __lowercase : List[Any]=True ):
'''simple docstring'''
__a = max([len(__lowercase ) for one in nested_token_ids] )
__a = {}
for token_ids in nested_token_ids:
__a = root
for tidx, token_id in enumerate(__lowercase ):
if token_id not in level:
__a = {}
__a = level[token_id]
if no_subsets and self.has_subsets(__lowercase , __lowercase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
F" {nested_token_ids}." )
__a = root
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = self.trie
for current_token in current_seq:
__a = start[current_token]
__a = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self : List[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = self.next_tokens(__lowercase )
return len(__lowercase ) == 0
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Any ):
'''simple docstring'''
__a = list(root.values() )
if len(__lowercase ) == 0:
return 1
else:
return sum([self.count_leaves(__lowercase ) for nn in next_nodes] )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : int ):
'''simple docstring'''
__a = self.count_leaves(__lowercase )
return len(__lowercase ) != leaf_count
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __lowercase : List[List[int]] ):
'''simple docstring'''
super(__lowercase , self ).__init__()
if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(__lowercase , __lowercase ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(__lowercase , __lowercase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
__a = DisjunctiveTrie(__lowercase )
__a = nested_token_ids
__a = self.trie.max_height
__a = []
__a = False
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.trie.next_tokens(self.current_seq )
if len(__lowercase ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self : Any , __lowercase : int ):
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}" )
__a = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self : Any , __lowercase : int ):
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}" )
__a = False
__a = False
__a = False
if self.does_advance(__lowercase ):
self.current_seq.append(__lowercase )
__a = True
else:
__a = True
self.reset()
__a = self.trie.reached_leaf(self.current_seq )
__a = completed
return stepped, completed, reset
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = False
__a = []
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any]=False ):
'''simple docstring'''
__a = DisjunctiveConstraint(self.token_ids )
if stateful:
__a = self.seqlen
__a = self.current_seq
__a = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowercase : List[Constraint] ):
'''simple docstring'''
__a = constraints
# max # of steps required to fulfill a given constraint
__a = max([c.seqlen for c in constraints] )
__a = len(__lowercase )
__a = False
self.init_state()
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = []
__a = None
__a = [constraint.copy(stateful=__lowercase ) for constraint in self.constraints]
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__a = constraint.advance()
if isinstance(__lowercase , __lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase , __lowercase ):
token_list.extend(__lowercase )
else:
__a = self.inprogress_constraint.advance()
if isinstance(__lowercase , __lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase , __lowercase ):
token_list.extend(__lowercase )
if len(__lowercase ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__a , __a = self.add(__lowercase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self : int , __lowercase : int ):
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
__a , __a = False, False
if self.completed:
__a = True
__a = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__a , __a , __a = self.inprogress_constraint.update(__lowercase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowercase ) )
__a = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__a = None
if len(self.pending_constraints ) == 0:
# we're done!
__a = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__lowercase ):
__a , __a , __a = pending_constraint.update(__lowercase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(__lowercase )
__a = None
if not complete and stepped:
__a = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__a = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__a = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self : Tuple , __lowercase : Union[str, Any]=True ):
'''simple docstring'''
__a = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__a = [
constraint.copy(stateful=__lowercase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__a = self.inprogress_constraint.copy(stateful=__lowercase )
__a = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 302
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCamelCase__ = """"""
lowerCamelCase__ = """"""
lowerCamelCase__ = """"""
lowerCamelCase__ = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a , __a = get_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("""Processing...""" )
__a , __a , __a = update_image_and_anno(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for index, image in enumerate(_SCREAMING_SNAKE_CASE ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__a = random_chars(32 )
__a = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
__a = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" , _SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Success {index+1}/{len(_SCREAMING_SNAKE_CASE )} with {file_name}" )
__a = []
for anno in new_annos[index]:
__a = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(_SCREAMING_SNAKE_CASE )
with open(f"/{file_root}.txt" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = []
__a = []
for label_file in glob.glob(os.path.join(_SCREAMING_SNAKE_CASE , """*.txt""" ) ):
__a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(_SCREAMING_SNAKE_CASE ) as in_file:
__a = in_file.readlines()
__a = os.path.join(_SCREAMING_SNAKE_CASE , f"{label_name}.jpg" )
__a = []
for obj_list in obj_lists:
__a = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_SCREAMING_SNAKE_CASE )
labels.append(_SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 1 ):
"""simple docstring"""
__a = []
__a = []
__a = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
__a = []
__a = img_list[idx]
path_list.append(_SCREAMING_SNAKE_CASE )
__a = anno_list[idx]
__a = cva.imread(_SCREAMING_SNAKE_CASE )
if flip_type == 1:
__a = cva.flip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for bbox in img_annos:
__a = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__a = cva.flip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for bbox in img_annos:
__a = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_SCREAMING_SNAKE_CASE )
new_imgs_list.append(_SCREAMING_SNAKE_CASE )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__a = ascii_lowercase + digits
return "".join(random.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 302
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int | float | str , _SCREAMING_SNAKE_CASE : int | float | str ):
"""simple docstring"""
if nth_term == "":
return [""]
__a = int(_SCREAMING_SNAKE_CASE )
__a = int(_SCREAMING_SNAKE_CASE )
__a = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f"1 / {pow(temp + 1 , int(_SCREAMING_SNAKE_CASE ) )}" if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = int(input("""Enter the last number (nth term) of the P-Series"""))
lowerCamelCase__ = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 302
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 1
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 128 , __lowercase : int = 256 , __lowercase : float = 2000.0 , __lowercase : int = 768 , __lowercase : int = 12 , __lowercase : int = 12 , __lowercase : int = 64 , __lowercase : int = 2048 , __lowercase : float = 0.1 , ):
'''simple docstring'''
super().__init__()
__a = nn.Sequential(
nn.Linear(__lowercase , d_model * 4 , bias=__lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowercase ) , nn.SiLU() , )
__a = nn.Embedding(__lowercase , __lowercase )
__a = False
__a = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
__a = nn.Dropout(p=__lowercase )
__a = nn.ModuleList()
for lyr_num in range(__lowercase ):
# FiLM conditional T5 decoder
__a = DecoderLayer(d_model=__lowercase , d_kv=__lowercase , num_heads=__lowercase , d_ff=__lowercase , dropout_rate=__lowercase )
self.decoders.append(__lowercase )
__a = TaLayerNorm(__lowercase )
__a = nn.Dropout(p=__lowercase )
__a = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Optional[int] ):
'''simple docstring'''
__a , __a , __a = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__a = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__a = self.conditioning_emb(__lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__a = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__a = torch.broadcast_to(
torch.arange(__lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__a = self.position_encoding(__lowercase )
__a = self.continuous_inputs_projection(__lowercase )
inputs += position_encodings
__a = self.dropout(__lowercase )
# decoder: No padding present.
__a = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__a = [(x, self.encoder_decoder_mask(__lowercase , __lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__a = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__a = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__a = lyr(
__lowercase , conditioning_emb=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )[0]
__a = self.decoder_norm(__lowercase )
__a = self.post_dropout(__lowercase )
__a = self.spec_out(__lowercase )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[str] , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Optional[int]=1E-6 ):
'''simple docstring'''
super().__init__()
__a = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowercase , d_kv=__lowercase , num_heads=__lowercase , dropout_rate=__lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowercase , d_kv=__lowercase , num_heads=__lowercase , dropout_rate=__lowercase , layer_norm_epsilon=__lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowercase , d_ff=__lowercase , dropout_rate=__lowercase , layer_norm_epsilon=__lowercase ) )
def UpperCamelCase_ ( self : Tuple , __lowercase : List[Any] , __lowercase : Union[str, Any]=None , __lowercase : int=None , __lowercase : Tuple=None , __lowercase : int=None , __lowercase : Optional[int]=None , ):
'''simple docstring'''
__a = self.layer[0](
__lowercase , conditioning_emb=__lowercase , attention_mask=__lowercase , )
if encoder_hidden_states is not None:
__a = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__a = self.layer[1](
__lowercase , key_value_states=__lowercase , attention_mask=__lowercase , )
# Apply Film Conditional Feed Forward layer
__a = self.layer[-1](__lowercase , __lowercase )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[str] , __lowercase : Any , __lowercase : List[str] , __lowercase : str , __lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
__a = TaLayerNorm(__lowercase )
__a = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowercase )
__a = Attention(query_dim=__lowercase , heads=__lowercase , dim_head=__lowercase , out_bias=__lowercase , scale_qk=__lowercase )
__a = nn.Dropout(__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : List[Any] , __lowercase : Dict=None , __lowercase : Optional[Any]=None , ):
'''simple docstring'''
# pre_self_attention_layer_norm
__a = self.layer_norm(__lowercase )
if conditioning_emb is not None:
__a = self.FiLMLayer(__lowercase , __lowercase )
# Self-attention block
__a = self.attention(__lowercase )
__a = hidden_states + self.dropout(__lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str , __lowercase : Any , __lowercase : int , __lowercase : Any , __lowercase : Optional[int] , __lowercase : List[str] ):
'''simple docstring'''
super().__init__()
__a = Attention(query_dim=__lowercase , heads=__lowercase , dim_head=__lowercase , out_bias=__lowercase , scale_qk=__lowercase )
__a = TaLayerNorm(__lowercase , eps=__lowercase )
__a = nn.Dropout(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : List[Any] , __lowercase : Any=None , __lowercase : List[str]=None , ):
'''simple docstring'''
__a = self.layer_norm(__lowercase )
__a = self.attention(
__lowercase , encoder_hidden_states=__lowercase , attention_mask=attention_mask.squeeze(1 ) , )
__a = hidden_states + self.dropout(__lowercase )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str , __lowercase : Any , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] ):
'''simple docstring'''
super().__init__()
__a = TaDenseGatedActDense(d_model=__lowercase , d_ff=__lowercase , dropout_rate=__lowercase )
__a = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowercase )
__a = TaLayerNorm(__lowercase , eps=__lowercase )
__a = nn.Dropout(__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Dict , __lowercase : Tuple=None ):
'''simple docstring'''
__a = self.layer_norm(__lowercase )
if conditioning_emb is not None:
__a = self.film(__lowercase , __lowercase )
__a = self.DenseReluDense(__lowercase )
__a = hidden_states + self.dropout(__lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Union[str, Any] , __lowercase : Any , __lowercase : Any , __lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
__a = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
__a = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
__a = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
__a = nn.Dropout(__lowercase )
__a = NewGELUActivation()
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Dict ):
'''simple docstring'''
__a = self.act(self.wi_a(__lowercase ) )
__a = self.wi_a(__lowercase )
__a = hidden_gelu * hidden_linear
__a = self.dropout(__lowercase )
__a = self.wo(__lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : Any=1E-6 ):
'''simple docstring'''
super().__init__()
__a = nn.Parameter(torch.ones(__lowercase ) )
__a = eps
def UpperCamelCase_ ( self : Dict , __lowercase : List[Any] ):
'''simple docstring'''
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__a = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowercase )
__a = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__a = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : torch.Tensor ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(__lowercase , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[str] , __lowercase : List[Any] , __lowercase : List[str] ):
'''simple docstring'''
super().__init__()
__a = nn.Linear(__lowercase , out_features * 2 , bias=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : List[Any] , __lowercase : List[Any] ):
'''simple docstring'''
__a = self.scale_bias(__lowercase )
__a , __a = torch.chunk(__lowercase , 2 , -1 )
__a = x * (1 + scale) + shift
return x
| 302
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : dict[int, list[int]] ):
"""simple docstring"""
__a = 0
__a = len(_SCREAMING_SNAKE_CASE ) # No of vertices in graph
__a = [0] * n
__a = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ):
__a = True
__a = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , id_ )
__a = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__a = min(low[at] , low[to] )
__a = []
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
dfs(_SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 1
|
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = []
__a = 11
__a = int("""1""" + """0""" * digit_len )
for num in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
__a = 10
return solutions
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 2 ):
"""simple docstring"""
__a = 1.0
for fraction in fraction_list(_SCREAMING_SNAKE_CASE ):
__a = Fraction(_SCREAMING_SNAKE_CASE )
result *= frac.denominator / frac.numerator
return int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 302
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase__ = False
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger """
__a = torch.manual_seed(0 )
__a = pipe(
prompt=__lowercase , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
__a = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = generator.manual_seed(0 )
__a = pipe(
prompt=__lowercase , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = """A painting of a squirrel eating a burger """
__a = torch.manual_seed(0 )
__a = pipe(
prompt=__lowercase , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 302
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 1
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : nn.ModuleList , _SCREAMING_SNAKE_CASE : nn.ModuleList , _SCREAMING_SNAKE_CASE : List[int] ):
"""simple docstring"""
__a = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), f"{len(_SCREAMING_SNAKE_CASE )} != {len(_SCREAMING_SNAKE_CASE )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowerCamelCase__ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCamelCase__ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
try:
__a = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(_SCREAMING_SNAKE_CASE ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, PreTrainedModel] , _SCREAMING_SNAKE_CASE : Union[str, Path] = "student" , _SCREAMING_SNAKE_CASE : Union[int, None] = None , _SCREAMING_SNAKE_CASE : Union[int, None] = None , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : int=None , **_SCREAMING_SNAKE_CASE : List[str] , ):
"""simple docstring"""
__a = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ).save_pretrained(_SCREAMING_SNAKE_CASE ) # purely for convenience
__a = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE ).eval()
else:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), f"teacher must be a model or string got type {type(_SCREAMING_SNAKE_CASE )}"
__a = teacher.config.to_diff_dict()
try:
__a , __a = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__a = teacher_e
if d is None:
__a = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
__a , __a = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__a , __a = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__a = teacher_e
if d is None:
__a = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_SCREAMING_SNAKE_CASE )
# Copy weights
__a = teacher.config_class(**_SCREAMING_SNAKE_CASE )
__a = AutoModelForSeqaSeqLM.from_config(_SCREAMING_SNAKE_CASE )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__a = student.load_state_dict(teacher.state_dict() , strict=_SCREAMING_SNAKE_CASE )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__a , __a = list(range(_SCREAMING_SNAKE_CASE ) ), list(range(_SCREAMING_SNAKE_CASE ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(_SCREAMING_SNAKE_CASE )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__a = pick_layers_to_copy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if d_layers_to_copy is None:
__a = pick_layers_to_copy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
if hasattr(
_SCREAMING_SNAKE_CASE , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _SCREAMING_SNAKE_CASE )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _SCREAMING_SNAKE_CASE )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _SCREAMING_SNAKE_CASE )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _SCREAMING_SNAKE_CASE )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _SCREAMING_SNAKE_CASE )
copy_layers(teacher.decoder.block , student.decoder.block , _SCREAMING_SNAKE_CASE )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
__a = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 302
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302
| 1
|
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCamelCase__ = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase__ = re.compile(r"""^\s*else:""")
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
__a = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__a = f.readlines()
__a = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__a = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__a = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
__a = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
__a = re.findall("""\[([^\]]+)\]""" , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__a = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__a = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__a = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__a = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__a = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
__a = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
__a = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
__a = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
__a = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__a = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__a = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__a = lines[line_index]
__a = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__a = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__a = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__a = lines[line_index]
__a = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__a = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
def find_duplicates(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__a = []
for key in import_dict_objects.keys():
__a = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
__a = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__a = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__a = os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" )
__a = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
__a = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__a = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("""\n\n""".join(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__a = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
__a = short_path.replace(os.path.sep , """.""" )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__a = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
__a = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
lowerCamelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__a = spec.loader.load_module()
__a = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
__a = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 302
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
__a = True if """large""" in model_name or """huge""" in model_name else False
__a = True if """large""" in model_name or """huge""" in model_name else False
__a = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__a = [3, 3, 3, 3]
__a = [5, 5, 5, 5]
elif "fl4" in model_name:
__a = [4, 4, 4, 4]
__a = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__a = [3, 3, 3, 3]
if "lrf" in model_name:
__a = [3, 3, 3, 3]
else:
__a = [2, 2, 2, 2]
if "tiny" in model_name:
__a = 96
elif "small" in model_name:
__a = 96
elif "base" in model_name:
__a = 128
elif "large" in model_name:
__a = 192
elif "xlarge" in model_name:
__a = 256
elif "huge" in model_name:
__a = 352
# set label information
__a = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__a = """imagenet-22k-id2label.json"""
else:
__a = """imagenet-1k-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = {v: k for k, v in idalabel.items()}
__a = FocalNetConfig(
embed_dim=_SCREAMING_SNAKE_CASE , depths=_SCREAMING_SNAKE_CASE , focal_levels=_SCREAMING_SNAKE_CASE , focal_windows=_SCREAMING_SNAKE_CASE , use_conv_embed=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , use_post_layernorm=_SCREAMING_SNAKE_CASE , use_layerscale=_SCREAMING_SNAKE_CASE , )
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__a = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__a = """encoder.""" + name
if "encoder.layers" in name:
__a = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__a = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__a = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__a = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__a = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__a = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__a = """layernorm.weight"""
if name == "norm.bias":
__a = """layernorm.bias"""
if "head" in name:
__a = name.replace("""head""" , """classifier""" )
else:
__a = """focalnet.""" + name
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
__a = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__a = model_name_to_url[model_name]
print("""Checkpoint URL: """ , _SCREAMING_SNAKE_CASE )
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
__a = state_dict.pop(_SCREAMING_SNAKE_CASE )
__a = val
__a = get_focalnet_config(_SCREAMING_SNAKE_CASE )
__a = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify conversion
__a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a = BitImageProcessor(
do_resize=_SCREAMING_SNAKE_CASE , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , )
__a = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__a = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
__a = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__a = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _SCREAMING_SNAKE_CASE , atol=1e-4 )
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__a = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__a = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__a = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__a = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__a = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__a = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
lowerCamelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index == len(_SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(_SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Color current vertex
__a = i
# Validate coloring
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ):
return True
# Backtrack
__a = -1
return False
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = [-1] * len(_SCREAMING_SNAKE_CASE )
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 ):
return colored_vertices
return []
| 302
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 1
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase__ = Mapping[str, np.ndarray]
lowerCamelCase__ = Mapping[str, Any] # Is a nested dict.
lowerCamelCase__ = 0.01
@dataclasses.dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__lowerCamelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__lowerCamelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__lowerCamelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__lowerCamelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__lowerCamelCase : Optional[np.ndarray] =None
# Optional remark about the protein. Included as a comment in output PDB
# files
__lowerCamelCase : Optional[str] =None
# Templates used to generate this protein (prediction-only)
__lowerCamelCase : Optional[Sequence[str]] =None
# Chain corresponding to each parent
__lowerCamelCase : Optional[Sequence[int]] =None
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = r"""(\[[A-Z]+\]\n)"""
__a = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
__a = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
__a = ["N", "CA", "C"]
__a = None
__a = None
__a = None
for g in groups:
if "[PRIMARY]" == g[0]:
__a = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
__a = """X""" # FIXME: strings are immutable
__a = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__a = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
__a = np.array(_SCREAMING_SNAKE_CASE )
__a = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
__a = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__a = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
__a = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
__a = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : int = 0 ):
"""simple docstring"""
__a = []
__a = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
__a = prot.parents
__a = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__a = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
__a = ["""N/A"""]
pdb_headers.append(f"PARENT {' '.join(_SCREAMING_SNAKE_CASE )}" )
return pdb_headers
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = []
__a = pdb_str.split("""\n""" )
__a = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
__a = 42
if prot.parents is not None and len(prot.parents ) > 0:
__a = []
if prot.parents_chain_index is not None:
__a = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
__a = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__a = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ["""N/A"""] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__a = [["""N/A"""]]
def make_parent_line(_SCREAMING_SNAKE_CASE : Sequence[str] ) -> str:
return f"PARENT {' '.join(_SCREAMING_SNAKE_CASE )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__a = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
__a = parents_per_chain[chain_counter]
else:
__a = ["""N/A"""]
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Protein ):
"""simple docstring"""
__a = residue_constants.restypes + ["""X"""]
def res_atoa(_SCREAMING_SNAKE_CASE : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
__a = residue_constants.atom_types
__a = []
__a = prot.atom_mask
__a = prot.aatype
__a = prot.atom_positions
__a = prot.residue_index.astype(np.intaa )
__a = prot.b_factors
__a = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
__a = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
__a = aatype.shape[0]
__a = 1
__a = 0
__a = string.ascii_uppercase
__a = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
__a = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__a = """ATOM"""
__a = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else f" {atom_name}"
__a = """"""
__a = """"""
__a = 1.00
__a = atom_name[0] # Protein supports only C, N, O, S, this works.
__a = """"""
__a = """A"""
if chain_index is not None:
__a = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__a = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
__a = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__a = True
__a = chain_index[i + 1]
if should_terminate:
# Close the chain.
__a = """TER"""
__a = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Protein ):
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : FeatureDict , _SCREAMING_SNAKE_CASE : ModelOutput , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[int]] = None , ):
"""simple docstring"""
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 302
|
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : str =None
__lowerCamelCase : Optional[int] =BloomTokenizerFast
__lowerCamelCase : str =BloomTokenizerFast
__lowerCamelCase : Optional[int] =True
__lowerCamelCase : List[Any] =False
__lowerCamelCase : List[str] ='tokenizer_file'
__lowerCamelCase : List[str] ={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
__a = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[Any] , **__lowercase : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.get_rust_tokenizer()
__a = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__a = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
__a = tokenizer.batch_encode_plus(__lowercase )["""input_ids"""]
self.assertListEqual(__lowercase , __lowercase )
__a = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : List[str]=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__a = """This is a simple input"""
__a = ["""This is a simple input 1""", """This is a simple input 2"""]
__a = ("""This is a simple input""", """This is a pair""")
__a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(__lowercase , max_length=__lowercase )
tokenizer_r.encode_plus(__lowercase , max_length=__lowercase )
tokenizer_r.batch_encode_plus(__lowercase , max_length=__lowercase )
tokenizer_r.encode(__lowercase , max_length=__lowercase )
tokenizer_r.batch_encode_plus(__lowercase , max_length=__lowercase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__a = None # Hotfixing padding = None
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.get_rust_tokenizer()
__a = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=__lowercase )
__a = next(iter(__lowercase ) )["""premise"""] # pick up one data
__a = list(sample_data.values() )
__a = list(map(tokenizer.encode , __lowercase ) )
__a = [tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase ) for x in output_tokens]
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 302
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ):
"""simple docstring"""
__a , __a = len(_SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__a = 0
count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 1
|
from math import factorial
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__a = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__a = float(factorial(_SCREAMING_SNAKE_CASE ) )
coefficient /= factorial(_SCREAMING_SNAKE_CASE ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 302
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
from __future__ import annotations
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """MIT"""
lowerCamelCase__ = """1.0.0"""
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """contact@muhammadumerfarooq.me"""
lowerCamelCase__ = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : str , __lowercase : str ):
'''simple docstring'''
super().__init__()
__a = []
__a = domain
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str , __lowercase : list[tuple[str, str | None]] ):
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__a = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(_SCREAMING_SNAKE_CASE ).split(""".""" )[-2:] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return parse.urlparse(_SCREAMING_SNAKE_CASE ).netloc
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "https://github.com" ):
"""simple docstring"""
__a = get_domain_name(_SCREAMING_SNAKE_CASE )
# Initialize the parser
__a = Parser(_SCREAMING_SNAKE_CASE )
try:
# Open URL
__a = requests.get(_SCREAMING_SNAKE_CASE )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__a = requests.get(_SCREAMING_SNAKE_CASE )
# Get the valid email.
__a = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_SCREAMING_SNAKE_CASE )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url("""https://github.com""")
print(F"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 302
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : List[str] , __lowercase : bool = True ):
'''simple docstring'''
__a = {} # dictionary of lists
__a = directed
def UpperCamelCase_ ( self : Dict , __lowercase : T , __lowercase : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
self.adj_list[destination_vertex].append(__lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
__a = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowercase )
__a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__a = [destination_vertex]
__a = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
__a = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__a = [destination_vertex]
__a = []
return self
def __repr__( self : List[str] ):
'''simple docstring'''
return pformat(self.adj_list )
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowerCAmelCase__ ( ):
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 302
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : List[str] , __lowercase : T ):
'''simple docstring'''
__a = data
__a = None
def __str__( self : List[str] ):
'''simple docstring'''
return F"{self.data}"
class SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__a = None
def __iter__( self : List[str] ):
'''simple docstring'''
__a = self.top
while node:
yield node.data
__a = node.next
def __str__( self : Optional[Any] ):
'''simple docstring'''
return "->".join([str(__lowercase ) for item in self] )
def __len__( self : Any ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.top is None
def UpperCamelCase_ ( self : Tuple , __lowercase : T ):
'''simple docstring'''
__a = Node(__lowercase )
if not self.is_empty():
__a = self.top
__a = node
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , __lowercase )
__a = self.top
__a = self.top.next
return pop_node.data
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 1
|
lowerCamelCase__ = {}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__a = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__a = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__a = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__a = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , 0 )
__a = state_late + state_absent + state_ontime
__a = prizestrings
return prizestrings
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 30 ):
"""simple docstring"""
return _calculate(_SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 302
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 1
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
__a = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__a = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a = 16
elif accelerator.mixed_precision != "no":
__a = 8
else:
__a = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _SCREAMING_SNAKE_CASE ) == "1":
__a = 2
# Initialize accelerator
__a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config["""lr"""]
__a = int(config["""num_epochs"""] )
__a = int(config["""seed"""] )
__a = int(config["""batch_size"""] )
__a = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a = model.to(accelerator.device )
# Instantiate optimizer
__a = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
__a , __a = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__a = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits.argmax(dim=-1 )
__a , __a = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__a = parser.parse_args()
__a = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 302
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : str ='vit'
def __init__( self : Optional[Any] , __lowercase : List[Any]=768 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : Tuple=3072 , __lowercase : Dict="gelu" , __lowercase : List[Any]=0.0 , __lowercase : Any=0.0 , __lowercase : Optional[int]=0.02 , __lowercase : Optional[Any]=1E-12 , __lowercase : List[str]=224 , __lowercase : Any=16 , __lowercase : Optional[int]=3 , __lowercase : Optional[Any]=True , __lowercase : int=16 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = encoder_stride
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =version.parse('1.11' )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return 1E-4
| 302
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__a = grid[0]
for row_n in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
__a = grid[row_n]
__a = fill_row(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict ='roberta'
def __init__( self : str , __lowercase : int=50265 , __lowercase : Optional[Any]=768 , __lowercase : Optional[Any]=12 , __lowercase : Union[str, Any]=12 , __lowercase : List[Any]=3072 , __lowercase : Optional[Any]="gelu" , __lowercase : Tuple=0.1 , __lowercase : int=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : Any=1 , __lowercase : Optional[Any]=0 , __lowercase : Union[str, Any]=2 , __lowercase : Dict="absolute" , __lowercase : Any=True , __lowercase : Union[str, Any]=None , **__lowercase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 302
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 1
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a = text_path
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a = [text_path]
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str=("train",) ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader({"""train""": text_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__a = {"""text""": """string"""}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader({"""train""": text_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if split:
__a = {split: text_path}
else:
__a = """train"""
__a = {"""train""": text_path, """test""": text_path}
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 302
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCamelCase__ = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] ='maskformer'
__lowerCamelCase : Tuple ={'hidden_size': 'mask_feature_size'}
__lowerCamelCase : Union[str, Any] =['resnet', 'swin']
__lowerCamelCase : Dict =['detr']
def __init__( self : List[str] , __lowercase : int = 256 , __lowercase : int = 256 , __lowercase : float = 0.1 , __lowercase : bool = False , __lowercase : Optional[Dict] = None , __lowercase : Optional[Dict] = None , __lowercase : float = 0.02 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 20.0 , __lowercase : Optional[bool] = None , **__lowercase : Tuple , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowercase , __lowercase ):
__a = backbone_config.pop("""model_type""" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(__lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop("""model_type""" ) if isinstance(__lowercase , __lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(__lowercase , __lowercase ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(__lowercase )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**__lowercase )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , __lowercase : PretrainedConfig , __lowercase : PretrainedConfig , **__lowercase : int ):
'''simple docstring'''
return cls(
backbone_config=__lowercase , decoder_config=__lowercase , **__lowercase , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 302
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 1
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
lowerCamelCase__ = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith("""emb.""" ):
__a = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
__a = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
__a = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , _SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
__a = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , _SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
__a = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
__a = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
__a = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
__a = """rwkv.""" + name
__a = weight
return state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
__a = 5_0277
__a = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
__a = RwkvConfig(
vocab_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__a = convert_state_dict(_SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if index is not None:
__a = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save the index as well
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
__a = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n"""
f.write(_SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
__a = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE , max_shard_size="""2GB""" )
tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 302
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Any ='vivit'
def __init__( self : Optional[int] , __lowercase : Any=224 , __lowercase : List[Any]=32 , __lowercase : Dict=[2, 16, 16] , __lowercase : Union[str, Any]=3 , __lowercase : List[str]=768 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : Optional[Any]=3072 , __lowercase : Dict="gelu_fast" , __lowercase : Optional[int]=0.0 , __lowercase : Any=0.0 , __lowercase : int=0.02 , __lowercase : Any=1E-06 , __lowercase : int=True , **__lowercase : Tuple , ):
'''simple docstring'''
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = num_frames
__a = tubelet_size
__a = num_channels
__a = qkv_bias
super().__init__(**__lowercase )
| 302
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : str =StableDiffusionLatentUpscalePipeline
__lowerCamelCase : Optional[int] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCamelCase : Optional[int] =PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCamelCase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase : List[str] =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase : Union[str, Any] =frozenset([] )
__lowerCamelCase : Union[str, Any] =True
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = 1
__a = 4
__a = (16, 16)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
__a = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=__lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=__lowercase , only_cross_attention=__lowercase , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
__a = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
__a = EulerDiscreteScheduler(prediction_type="""sample""" )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
__a = CLIPTextModel(__lowercase )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[int] , __lowercase : Dict=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = self.get_dummy_inputs(__lowercase )
__a = pipe(**__lowercase ).images
__a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
__a = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
__a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase , 1E-3 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = self.get_dummy_inputs(__lowercase )
__a = 2
__a = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__a = getattr(__lowercase , scheduler_enum.name )
__a = scheduler_cls.from_config(pipe.scheduler.config )
__a = pipe(**__lowercase )[0]
outputs.append(__lowercase )
assert check_same_shape(__lowercase )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = torch.manual_seed(33 )
__a = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__a = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
__a = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
__a = pipe(__lowercase , generator=__lowercase , output_type="""latent""" ).images
__a = upscaler(
prompt=__lowercase , image=__lowercase , num_inference_steps=20 , guidance_scale=0 , generator=__lowercase , output_type="""np""" , ).images[0]
__a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = torch.manual_seed(33 )
__a = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
__a = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
__a = upscaler(
prompt=__lowercase , image=__lowercase , num_inference_steps=20 , guidance_scale=0 , generator=__lowercase , output_type="""np""" , ).images[0]
__a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 302
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 1
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : int , __lowercase : Optional[Any] , __lowercase : int=13 , __lowercase : int=32 , __lowercase : str=3 , __lowercase : List[str]=4 , __lowercase : Dict=[10, 20, 30, 40] , __lowercase : str=[2, 2, 3, 2] , __lowercase : str=True , __lowercase : Union[str, Any]=True , __lowercase : Optional[int]=37 , __lowercase : Dict="gelu" , __lowercase : Dict=10 , __lowercase : Any=0.02 , __lowercase : Tuple=["stage2", "stage3", "stage4"] , __lowercase : int=[2, 3, 4] , __lowercase : List[str]=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = num_stages
__a = hidden_sizes
__a = depths
__a = is_training
__a = use_labels
__a = intermediate_size
__a = hidden_act
__a = num_labels
__a = initializer_range
__a = out_features
__a = out_indices
__a = scope
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = ConvNextVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase_ ( self : Any , __lowercase : Dict , __lowercase : Dict , __lowercase : List[Any] ):
'''simple docstring'''
__a = ConvNextVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Tuple ):
'''simple docstring'''
__a = ConvNextVaBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a = None
__a = ConvNextVaBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[str] =(
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : int =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : List[Any] =False
__lowerCamelCase : Union[str, Any] =False
__lowerCamelCase : Optional[Any] =False
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = ConvNextVaModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a = self.model_tester.prepare_config_and_inputs_with_labels()
__a = True
if model_class.__name__ in [
*get_values(__lowercase ),
*get_values(__lowercase ),
]:
continue
__a = model_class(__lowercase )
model.to(__lowercase )
model.train()
__a = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__a = model(**__lowercase ).loss
loss.backward()
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a = self.model_tester.prepare_config_and_inputs_with_labels()
__a = False
__a = True
if (
model_class.__name__
in [*get_values(__lowercase ), *get_values(__lowercase )]
or not model_class.supports_gradient_checkpointing
):
continue
__a = model_class(__lowercase )
model.to(__lowercase )
model.gradient_checkpointing_enable()
model.train()
__a = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__a = model(**__lowercase ).loss
loss.backward()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Optional[Any] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ConvNextVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = preprocessor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 302
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 1
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = FileLock(str(tmpdir / """foo.lock""" ) )
__a = FileLock(str(tmpdir / """foo.lock""" ) )
__a = 0.01
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
__a = time.time()
locka.acquire(_SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = """a""" * 1000 + """.lock"""
__a = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__a = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 302
|
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase__ = False
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__a = torch.manual_seed(0 )
__a = pipe.dual_guided(
prompt="""first prompt""" , image=__lowercase , text_to_image_strength=0.75 , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
__a = VersatileDiffusionPipeline.from_pretrained(__lowercase , torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = generator.manual_seed(0 )
__a = pipe.dual_guided(
prompt="""first prompt""" , image=__lowercase , text_to_image_strength=0.75 , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = """cyberpunk 2077"""
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__a = torch.manual_seed(0 )
__a = pipe.dual_guided(
prompt=__lowercase , image=__lowercase , text_to_image_strength=0.75 , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a = """A painting of a squirrel eating a burger """
__a = torch.manual_seed(0 )
__a = pipe.text_to_image(
prompt=__lowercase , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a = pipe.image_variation(__lowercase , generator=__lowercase , output_type="""numpy""" ).images
__a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 302
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
__a = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = os.path.join(_SCREAMING_SNAKE_CASE , """tmp.sql""" )
__a = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__a = iter_sql_file(_SCREAMING_SNAKE_CASE )
__a = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = os.path.join(_SCREAMING_SNAKE_CASE , """tmp.sql""" )
__a = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__a = iter_sql_file(_SCREAMING_SNAKE_CASE )
__a = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = os.path.join(_SCREAMING_SNAKE_CASE , """tmp.sql""" )
__a = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read()
with pytest.raises(_SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(_SCREAMING_SNAKE_CASE , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 302
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[Any] =LEDConfig
__lowerCamelCase : Any ={}
__lowerCamelCase : Optional[Any] ='gelu'
def __init__( self : List[Any] , __lowercase : int , __lowercase : int=13 , __lowercase : List[str]=7 , __lowercase : Union[str, Any]=True , __lowercase : Optional[Any]=False , __lowercase : Tuple=99 , __lowercase : List[Any]=32 , __lowercase : Any=2 , __lowercase : Any=4 , __lowercase : List[str]=37 , __lowercase : Any=0.1 , __lowercase : int=0.1 , __lowercase : Optional[int]=20 , __lowercase : List[str]=2 , __lowercase : Optional[int]=1 , __lowercase : Optional[int]=0 , __lowercase : List[str]=4 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
__a = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__a = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__a = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a = tf.concat([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__a = prepare_led_inputs_dict(__lowercase , __lowercase , __lowercase )
__a = tf.concat(
[tf.zeros_like(__lowercase )[:, :-1], tf.ones_like(__lowercase )[:, -1:]] , axis=-1 , )
__a = global_attention_mask
return config, inputs_dict
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Optional[int] ):
'''simple docstring'''
__a = TFLEDModel(config=__lowercase ).get_decoder()
__a = inputs_dict["""input_ids"""]
__a = input_ids[:1, :]
__a = inputs_dict["""attention_mask"""][:1, :]
__a = 1
# first forward pass
__a = model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a = tf.concat([input_ids, next_tokens] , axis=-1 )
__a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a = model(__lowercase , attention_mask=__lowercase )[0]
__a = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a = output_from_no_past[:, -3:, random_slice_idx]
__a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1E-3 )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[str]=None , ):
"""simple docstring"""
if attention_mask is None:
__a = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__lowerCamelCase : str =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Any =(
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Any =True
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : Union[str, Any] =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = TFLEDModelTester(self )
__a = ConfigTester(self , config_class=__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = tf.zeros_like(inputs_dict["""attention_mask"""] )
__a = 2
__a = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
__a = True
__a = self.model_tester.seq_length
__a = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowercase : Union[str, Any] ):
__a = outputs.decoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowercase : Tuple ):
__a = [t.numpy() for t in outputs.encoder_attentions]
__a = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__a = True
__a = False
__a = False
__a = model_class(__lowercase )
__a = model(self._prepare_for_class(__lowercase , __lowercase ) )
__a = len(__lowercase )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
if self.is_encoder_decoder:
__a = model_class(__lowercase )
__a = model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_decoder_attentions_output(__lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(__lowercase )
__a = model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(__lowercase )
__a = model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowercase ) )
self.assertEqual(model.config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return tf.constant(_SCREAMING_SNAKE_CASE , dtype=tf.intaa )
lowerCamelCase__ = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
__a = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__a = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__a = prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__a = model(**__lowercase )[0]
__a = (1, 1024, 768)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__a = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1E-3 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
__a = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__a = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__a = prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__a = model(**__lowercase )[0]
__a = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__a = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1E-3 , rtol=1E-3 )
| 302
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class SCREAMING_SNAKE_CASE ( Generic[T] ):
__lowerCamelCase : deque[T] # Cache store of keys
__lowerCamelCase : set[T] # References of the keys in cache
__lowerCamelCase : int =10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ):
'''simple docstring'''
__a = deque()
__a = set()
if not n:
__a = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
__a = n
def UpperCamelCase_ ( self : Optional[int] , __lowercase : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__a = self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ):
'''simple docstring'''
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 302
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 1
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : List[Any] , __lowercase : int , __lowercase : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
__a = img
__a = img.shape[1]
__a = img.shape[0]
__a = dst_width
__a = dst_height
__a = self.src_w / self.dst_w
__a = self.src_h / self.dst_h
__a = __a = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__a = self.img[self.get_y(__lowercase )][self.get_x(__lowercase )]
def UpperCamelCase_ ( self : str , __lowercase : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def UpperCamelCase_ ( self : str , __lowercase : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ = 800, 600
lowerCamelCase__ = imread("""image_data/lena.jpg""", 1)
lowerCamelCase__ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
import numpy
# List of input, output pairs
lowerCamelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCamelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCamelCase__ = [2, 4, 1, 5]
lowerCamelCase__ = len(train_data)
lowerCamelCase__ = 0.009
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any]="train" ):
"""simple docstring"""
return calculate_hypothesis_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - output(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__a = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=m ):
"""simple docstring"""
__a = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(_SCREAMING_SNAKE_CASE )
else:
summation_value += _error(_SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__a = summation_of_cost_derivative(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def lowerCAmelCase__ ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__a = 0.00_0002
__a = 0
__a = 0
while True:
j += 1
__a = [0, 0, 0, 0]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
__a = get_cost_derivative(i - 1 )
__a = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE , rtol=_SCREAMING_SNAKE_CASE , ):
break
__a = temp_parameter_vector
print(("""Number of iterations:""", j) )
def lowerCAmelCase__ ( ):
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
print(("""Actual output value:""", output(_SCREAMING_SNAKE_CASE , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(_SCREAMING_SNAKE_CASE , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 302
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='altclip_text_model'
def __init__( self : Any , __lowercase : str=250002 , __lowercase : Union[str, Any]=1024 , __lowercase : int=24 , __lowercase : List[Any]=16 , __lowercase : Tuple=4096 , __lowercase : Union[str, Any]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : str=514 , __lowercase : Tuple=1 , __lowercase : Dict=0.02 , __lowercase : List[str]=0.02 , __lowercase : Optional[Any]=1E-05 , __lowercase : Dict=1 , __lowercase : Optional[Any]=0 , __lowercase : Any=2 , __lowercase : Union[str, Any]="absolute" , __lowercase : Optional[Any]=True , __lowercase : Any=768 , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = initializer_factor
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = project_dim
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict ='altclip_vision_model'
def __init__( self : str , __lowercase : Union[str, Any]=768 , __lowercase : Tuple=3072 , __lowercase : int=512 , __lowercase : List[Any]=12 , __lowercase : List[str]=12 , __lowercase : str=3 , __lowercase : List[str]=224 , __lowercase : str=32 , __lowercase : str="quick_gelu" , __lowercase : Tuple=1E-5 , __lowercase : List[Any]=0.0 , __lowercase : Any=0.02 , __lowercase : Tuple=1.0 , **__lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = hidden_size
__a = intermediate_size
__a = projection_dim
__a = num_hidden_layers
__a = num_attention_heads
__a = num_channels
__a = patch_size
__a = image_size
__a = initializer_range
__a = initializer_factor
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
@classmethod
def UpperCamelCase_ ( cls : List[Any] , __lowercase : Union[str, os.PathLike] , **__lowercase : int ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowercase )
__a , __a = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
__a = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowercase , **__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[str] ='altclip'
__lowerCamelCase : Tuple =True
def __init__( self : List[Any] , __lowercase : Union[str, Any]=None , __lowercase : Dict=None , __lowercase : List[Any]=768 , __lowercase : Optional[Any]=2.6592 , **__lowercase : Optional[Any] ):
'''simple docstring'''
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__a = kwargs.pop("""text_config_dict""" , __lowercase )
__a = kwargs.pop("""vision_config_dict""" , __lowercase )
super().__init__(**__lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__a = {}
# This is the complete result when using `text_config_dict`.
__a = AltCLIPTextConfig(**__lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__a = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__a = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(__lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__a = {}
# This is the complete result when using `vision_config_dict`.
__a = AltCLIPVisionConfig(**__lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__a = {
str(__lowercase ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__a = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__a = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(__lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__a = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
__a = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
__a = AltCLIPTextConfig(**__lowercase )
__a = AltCLIPVisionConfig(**__lowercase )
__a = projection_dim
__a = logit_scale_init_value
__a = 1.0
@classmethod
def UpperCamelCase_ ( cls : Dict , __lowercase : AltCLIPTextConfig , __lowercase : AltCLIPVisionConfig , **__lowercase : List[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = copy.deepcopy(self.__dict__ )
__a = self.text_config.to_dict()
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = False
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCamelCase__ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCamelCase__ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCamelCase__ = reader.read()
lowerCamelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCamelCase__ = UNetaDModel(**config)
else:
lowerCamelCase__ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCamelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase__ = config[key]
del config[key]
lowerCamelCase__ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCamelCase__ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCamelCase__ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCamelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCamelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCamelCase__ = param_value
lowerCamelCase__ = True
if not has_changed:
lowerCamelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 302
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger("""transformers.models.speecht5""")
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
hf_model.apply_weight_norm()
__a = checkpoint["""input_conv.weight_g"""]
__a = checkpoint["""input_conv.weight_v"""]
__a = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
__a = checkpoint[f"upsamples.{i}.1.weight_g"]
__a = checkpoint[f"upsamples.{i}.1.weight_v"]
__a = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__a = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
__a = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
__a = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
__a = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
__a = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
__a = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
__a = checkpoint["""output_conv.1.weight_g"""]
__a = checkpoint["""output_conv.1.weight_v"""]
__a = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : str=None , ):
"""simple docstring"""
if config_path is not None:
__a = SpeechTaHifiGanConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__a = SpeechTaHifiGanConfig()
__a = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE )
__a = torch.load(_SCREAMING_SNAKE_CASE )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = np.load(_SCREAMING_SNAKE_CASE )
__a = stats[0].reshape(-1 )
__a = stats[1].reshape(-1 )
__a = torch.from_numpy(_SCREAMING_SNAKE_CASE ).float()
__a = torch.from_numpy(_SCREAMING_SNAKE_CASE ).float()
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 302
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowercase : int ):
'''simple docstring'''
__a = value
__a = None
__a = None
class SCREAMING_SNAKE_CASE :
def __init__( self : int , __lowercase : Node ):
'''simple docstring'''
__a = tree
def UpperCamelCase_ ( self : int , __lowercase : Node | None ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[str] ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 1
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :str ) -> Dict:
a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a = 128
elif "12-12" in model_name:
a = 12
a = 12
elif "14-14" in model_name:
a = 14
a = 14
elif "16-16" in model_name:
a = 16
a = 16
else:
raise ValueError('''Model not supported''' )
a = '''huggingface/label-files'''
if "speech-commands" in model_name:
a = 35
a = '''speech-commands-v2-id2label.json'''
else:
a = 527
a = '''audioset-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def _a ( a :Optional[Any] ) -> Union[str, Any]:
if "module.v" in name:
a = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
a = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
a = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
a = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
a = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
a = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
a = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _a ( a :Optional[int] , a :List[str] ) -> Any:
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(a )
if "qkv" in key:
a = key.split('''.''' )
a = int(key_split[3] )
a = config.hidden_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
else:
a = val
return orig_state_dict
def _a ( a :Union[str, Any] ) -> Optional[Any]:
a = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(a , a )
@torch.no_grad()
def _a ( a :Union[str, Any] , a :Optional[int] , a :str=False ) -> Union[str, Any]:
a = get_audio_spectrogram_transformer_config(a )
a = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
a = model_name_to_url[model_name]
a = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )
# remove some keys
remove_keys(a )
# rename some keys
a = convert_state_dict(a , a )
# load 🤗 model
a = ASTForAudioClassification(a )
model.eval()
model.load_state_dict(a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a = -4.2_677_393 if '''speech-commands''' not in model_name else -6.845_978
a = 4.5_689_974 if '''speech-commands''' not in model_name else 5.5_654_526
a = 1_024 if '''speech-commands''' not in model_name else 128
a = ASTFeatureExtractor(mean=a , std=a , max_length=a )
if "speech-commands" in model_name:
a = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
a = dataset[0]['''audio''']['''array''']
else:
a = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
a , a = torchaudio.load(a )
a = waveform.squeeze().numpy()
a = feature_extractor(a , sampling_rate=16_000 , return_tensors='''pt''' )
# forward pass
a = model(**a )
a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
a = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , a , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(a )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.