code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def a__ ( A__ ):
if length <= 0 or not isinstance(A__, A__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 101 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def count_of_possible_combinations(SCREAMING_SNAKE_CASE ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCamelCase : str = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE )
for item in array )
UpperCamelCase : Union[str, Any] = answer
return answer
UpperCamelCase : Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = [0] * (target + 1)
UpperCamelCase : Dict = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ : List[Any] = 3
__magic_name__ : Optional[int] = 5
__magic_name__ : Union[str, Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 102 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
def get_masked_lm_array(lowerCAmelCase_ ):
_snake_case = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_snake_case = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
_snake_case = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_array(lowerCAmelCase_ ):
_snake_case = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_snake_case = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
_snake_case = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_layer_array(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_snake_case = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
_snake_case = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_attention_layer_array(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_snake_case = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = array.reshape(lowerCAmelCase_ )
if "kernel" in name:
_snake_case = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
print(f"""Loading model based on config from {config_path}...""" )
_snake_case = BertConfig.from_json_file(lowerCAmelCase_ )
_snake_case = BertForMaskedLM(lowerCAmelCase_ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_snake_case = model.bert.encoder.layer[layer_index]
# Self-attention
_snake_case = layer.attention.self
_snake_case = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
_snake_case = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
_snake_case = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
_snake_case = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
_snake_case = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
_snake_case = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
_snake_case = layer.attention.output
_snake_case = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
_snake_case = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
_snake_case = get_encoder_layer_array(lowerCAmelCase_ , '''_attention_layer_norm/gamma''' )
_snake_case = get_encoder_layer_array(lowerCAmelCase_ , '''_attention_layer_norm/beta''' )
# Intermediate
_snake_case = layer.intermediate
_snake_case = get_encoder_layer_array(lowerCAmelCase_ , '''_intermediate_dense/kernel''' )
_snake_case = get_encoder_layer_array(lowerCAmelCase_ , '''_intermediate_dense/bias''' )
# Output
_snake_case = layer.output
_snake_case = get_encoder_layer_array(lowerCAmelCase_ , '''_output_dense/kernel''' )
_snake_case = get_encoder_layer_array(lowerCAmelCase_ , '''_output_dense/bias''' )
_snake_case = get_encoder_layer_array(lowerCAmelCase_ , '''_output_layer_norm/gamma''' )
_snake_case = get_encoder_layer_array(lowerCAmelCase_ , '''_output_layer_norm/beta''' )
# Embeddings
_snake_case = get_encoder_array('''_position_embedding_layer/embeddings''' )
_snake_case = get_encoder_array('''_type_embedding_layer/embeddings''' )
_snake_case = get_encoder_array('''_embedding_norm_layer/gamma''' )
_snake_case = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
_snake_case = model.cls.predictions.transform
_snake_case = get_masked_lm_array('''dense/kernel''' )
_snake_case = get_masked_lm_array('''dense/bias''' )
_snake_case = get_masked_lm_array('''layer_norm/gamma''' )
_snake_case = get_masked_lm_array('''layer_norm/beta''' )
_snake_case = get_masked_lm_array('''embedding_table''' )
# Pooling
_snake_case = BertPooler(config=lowerCAmelCase_ )
_snake_case = get_encoder_array('''_pooler_layer/kernel''' )
_snake_case = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(lowerCAmelCase_ )
# Integration test - should load without any errors ;)
_snake_case = BertForMaskedLM.from_pretrained(lowerCAmelCase_ )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
snake_case = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 103 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
A__ : str = field(
metadata={"help": "The output directory where the model will be written."} , )
A__ : str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
A__ : str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
A__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
A__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def _lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ = HfArgumentParser((ModelArguments,) )
((A__) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A__ = True
A__ = True
A__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=UpperCAmelCase_, decoder_config=UpperCAmelCase_, )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A__ = decoder_config.decoder_start_token_id
A__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
A__ = decoder_config.bos_token_id
if pad_token_id is None:
A__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A__ = decoder_config.eos_token_id
A__ = decoder_start_token_id
A__ = pad_token_id
A__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 104 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ : list[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
if len(lowerCAmelCase__ ) < k or k < 0:
raise ValueError('Invalid Input' )
A = A = sum(array[:k] )
for i in range(len(lowerCAmelCase__ ) - k ):
A = current_sum - array[i] + array[i + k]
A = max(lowerCAmelCase__ , lowerCAmelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__snake_case :Any =[randint(-1000, 1000) for i in range(100)]
__snake_case :Tuple =randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''') | 106 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = ["image_processor", "tokenizer"]
__lowerCAmelCase = "BridgeTowerImageProcessor"
__lowerCAmelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str ) -> int:
super().__init__(UpperCamelCase__, UpperCamelCase__ )
def __call__( self : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, UpperCamelCase__ : bool = True, UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False, UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None, UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : int = 0, UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional[Union[str, TensorType]] = None, **UpperCamelCase__ : Any, ) -> BatchEncoding:
_A = self.tokenizer(
text=UpperCamelCase__, add_special_tokens=UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, max_length=UpperCamelCase__, stride=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, return_special_tokens_mask=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, return_length=UpperCamelCase__, verbose=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, )
# add pixel_values + pixel_mask
_A = self.image_processor(
UpperCamelCase__, return_tensors=UpperCamelCase__, do_normalize=UpperCamelCase__, do_center_crop=UpperCamelCase__, **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def __UpperCAmelCase ( self : Tuple, *UpperCamelCase__ : Tuple, **UpperCamelCase__ : Optional[int] ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any], *UpperCamelCase__ : Union[str, Any], **UpperCamelCase__ : Optional[int] ) -> str:
return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 107 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__a: List[str] = namedtuple('''covid_data''', '''cases deaths recovered''')
def _SCREAMING_SNAKE_CASE ( __snake_case = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_UpperCAmelCase = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(__snake_case ).content ).xpath(__snake_case ) )
__a: Tuple = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats())) | 108 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = "▁"
a = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : str = BertGenerationTokenizer
__UpperCamelCase : Tuple = False
__UpperCamelCase : int = True
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowerCamelCase ,keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """<s>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) ,lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<pad>""" )
self.assertEqual(len(lowerCamelCase ) ,1002 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowerCamelCase ,keep_accents=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[285, 46, 10, 170, 382] ,)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """Hello World!"""
__SCREAMING_SNAKE_CASE = [1_8536, 2260, 101]
self.assertListEqual(lowerCamelCase ,self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__SCREAMING_SNAKE_CASE = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(lowerCamelCase ,self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys() )[:10]
__SCREAMING_SNAKE_CASE = """ """.join(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(lowerCamelCase ,return_tensors="""pt""" ,return_token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] ,return_tensors="""pt""" ,return_token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = BertGenerationConfig()
__SCREAMING_SNAKE_CASE = BertGenerationEncoder(lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase ,model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" ,revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" ,)
| 109 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 0 |
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Union[str, Any] = len(_snake_case )
UpperCAmelCase__ : Dict = len(_snake_case )
UpperCAmelCase__ : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCAmelCase__ : Any = True
for i in range(_snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCAmelCase__ : Union[str, Any] = True
if a[i].islower():
UpperCAmelCase__ : Any = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ (_UpperCAmelCase):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def lowerCamelCase__ (_UpperCAmelCase):
# word like '180' or '身高' or '神'
for char in word:
SCREAMING_SNAKE_CASE = ord(snake_case__)
if not _is_chinese_char(snake_case__):
return 0
return 1
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set()
for token in tokens:
SCREAMING_SNAKE_CASE = len(snake_case__) > 1 and is_chinese(snake_case__)
if chinese_word:
word_set.add(snake_case__)
SCREAMING_SNAKE_CASE = list(snake_case__)
return word_list
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE = max([len(snake_case__) for w in chinese_word_set])
SCREAMING_SNAKE_CASE = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, len(snake_case__)
while start < end:
SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start]):
SCREAMING_SNAKE_CASE = min(end - start , snake_case__)
for i in range(snake_case__ , 1 , -1):
SCREAMING_SNAKE_CASE = ''.join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
SCREAMING_SNAKE_CASE = '##' + bert_word[j]
SCREAMING_SNAKE_CASE = start + i
SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for i in range(0 , len(snake_case__) , 100):
SCREAMING_SNAKE_CASE = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws']).cws
SCREAMING_SNAKE_CASE = [get_chinese_word(snake_case__) for r in res]
ltp_res.extend(snake_case__)
assert len(snake_case__) == len(snake_case__)
SCREAMING_SNAKE_CASE = []
for i in range(0 , len(snake_case__) , 100):
SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=snake_case__ , truncation=snake_case__ , max_length=512)
bert_res.extend(res['input_ids'])
assert len(snake_case__) == len(snake_case__)
SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(snake_case__ , snake_case__):
SCREAMING_SNAKE_CASE = []
for id in input_ids:
SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(snake_case__)
input_tokens.append(snake_case__)
SCREAMING_SNAKE_CASE = add_sub_symbol(snake_case__ , snake_case__)
SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case__):
if token[:2] == "##":
SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(snake_case__) == 1 and _is_chinese_char(ord(snake_case__)):
ref_id.append(snake_case__)
ref_ids.append(snake_case__)
assert len(snake_case__) == len(snake_case__)
return ref_ids
def lowerCamelCase__ (_UpperCAmelCase):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(snake_case__) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE = LTP(args.ltp) # faster in GPU device
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert)
SCREAMING_SNAKE_CASE = prepare_ref(snake_case__ , snake_case__ , snake_case__)
with open(args.save_path , 'w' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = [json.dumps(snake_case__) + '\n' for ref in ref_ids]
f.writelines(snake_case__)
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
a_ : Union[str, Any] = parser.parse_args()
main(args)
| 73 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
SCREAMING_SNAKE_CASE_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
SCREAMING_SNAKE_CASE_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : int = '''whisper'''
__SCREAMING_SNAKE_CASE : str = ['''past_key_values''']
__SCREAMING_SNAKE_CASE : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase_=5_1_8_6_5 , lowercase_=8_0 , lowercase_=6 , lowercase_=4 , lowercase_=6 , lowercase_=4 , lowercase_=1_5_3_6 , lowercase_=1_5_3_6 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=5_0_2_5_7 , lowercase_=True , lowercase_=True , lowercase_="gelu" , lowercase_=2_5_6 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=False , lowercase_=1_5_0_0 , lowercase_=4_4_8 , lowercase_=5_0_2_5_6 , lowercase_=5_0_2_5_6 , lowercase_=5_0_2_5_6 , lowercase_=None , lowercase_=[2_2_0, 5_0_2_5_6] , lowercase_=False , lowercase_=2_5_6 , lowercase_=False , lowercase_=0.0_5 , lowercase_=1_0 , lowercase_=2 , lowercase_=0.0 , lowercase_=1_0 , lowercase_=0 , lowercase_=7 , **lowercase_ , ) -> List[Any]:
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , suppress_tokens=__A , begin_suppress_tokens=__A , **__A , )
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@property
def a_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase = {0: 'batch'}
else:
UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
return common_inputs
def a_ ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 2_2_0_5_0 , lowercase_ = 5.0 , lowercase_ = 2_2_0 , ) -> Mapping[str, Any]:
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__A , framework=__A , sampling_rate=__A , time_duration=__A , frequency=__A , )
UpperCAmelCase = encoder_inputs['input_features'].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , __A , __A , __A , __A )
UpperCAmelCase = encoder_inputs.pop('input_features' )
UpperCAmelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def a_ ( self ) -> float:
return 1E-3
| 373 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Optional[Any] = False
def _lowerCamelCase ( lowercase : Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __a : ArgumentParser ):
_a = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=__A , required=__A , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=__A , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=__A , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=__A , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=__A , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=__A , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=__A , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=__A , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=__A , default="bert-base-uncased" , help="Model\'s name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=__A , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=__A , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=__A , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=__A , default=1e-0_8 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] , __a : Namespace ):
_a = logging.get_logger("transformers-cli/training" )
_a = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=__A )
_a = args.output
_a = args.column_label
_a = args.column_text
_a = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
_a = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
_a = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_a = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
_a = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_a = args.validation_split
_a = args.train_batch_size
_a = args.valid_batch_size
_a = args.learning_rate
_a = args.adam_epsilon
def UpperCamelCase__ ( self : Optional[Any] ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self : Tuple ):
raise NotImplementedError
def UpperCamelCase__ ( self : Optional[int] ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 692 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 314 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = (PNDMScheduler,)
UpperCAmelCase = (('''num_inference_steps''', 50),)
def __UpperCamelCase ( self : Optional[int] , **_a : Any ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__A )
return config
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=0 , **_a : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE =kwargs.pop('''num_inference_steps''' , __A )
_SCREAMING_SNAKE_CASE =self.dummy_sample
_SCREAMING_SNAKE_CASE =0.1 * sample
_SCREAMING_SNAKE_CASE =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(**__A )
_SCREAMING_SNAKE_CASE =scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
_SCREAMING_SNAKE_CASE =scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE =dummy_past_residuals[:]
_SCREAMING_SNAKE_CASE =scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
_SCREAMING_SNAKE_CASE =new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_SCREAMING_SNAKE_CASE =scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
_SCREAMING_SNAKE_CASE =new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[int] , _a : Any=0 , **_a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE =kwargs.pop('''num_inference_steps''' , __A )
_SCREAMING_SNAKE_CASE =self.dummy_sample
_SCREAMING_SNAKE_CASE =0.1 * sample
_SCREAMING_SNAKE_CASE =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
_SCREAMING_SNAKE_CASE =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
_SCREAMING_SNAKE_CASE =scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
_SCREAMING_SNAKE_CASE =dummy_past_residuals[:]
_SCREAMING_SNAKE_CASE =scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
_SCREAMING_SNAKE_CASE =new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_SCREAMING_SNAKE_CASE =scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
_SCREAMING_SNAKE_CASE =new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : Any , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(**__A )
_SCREAMING_SNAKE_CASE =scheduler_class(**__A )
_SCREAMING_SNAKE_CASE =10
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.prk_timesteps ):
_SCREAMING_SNAKE_CASE =model(__A , __A )
_SCREAMING_SNAKE_CASE =scheduler.step_prk(__A , __A , __A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_SCREAMING_SNAKE_CASE =model(__A , __A )
_SCREAMING_SNAKE_CASE =scheduler.step_plms(__A , __A , __A ).prev_sample
return sample
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE =kwargs.pop('''num_inference_steps''' , __A )
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**__A )
_SCREAMING_SNAKE_CASE =self.dummy_sample
_SCREAMING_SNAKE_CASE =0.1 * sample
if num_inference_steps is not None and hasattr(__A , '''set_timesteps''' ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , '''set_timesteps''' ):
_SCREAMING_SNAKE_CASE =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_SCREAMING_SNAKE_CASE =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_SCREAMING_SNAKE_CASE =dummy_past_residuals[:]
_SCREAMING_SNAKE_CASE =scheduler.step_prk(__A , 0 , __A , **__A ).prev_sample
_SCREAMING_SNAKE_CASE =scheduler.step_prk(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_SCREAMING_SNAKE_CASE =scheduler.step_plms(__A , 0 , __A , **__A ).prev_sample
_SCREAMING_SNAKE_CASE =scheduler.step_plms(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(steps_offset=1 )
_SCREAMING_SNAKE_CASE =scheduler_class(**__A )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=__A )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__A )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =27
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE =self.dummy_sample
_SCREAMING_SNAKE_CASE =0.1 * sample
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**__A )
scheduler.set_timesteps(__A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_SCREAMING_SNAKE_CASE =scheduler.step_prk(__A , __A , __A ).prev_sample
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaises(__A ):
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**__A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.full_loop()
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(__A ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.full_loop(prediction_type='''v_prediction''' )
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(__A ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(__A ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(__A ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3 | 691 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_UpperCamelCase = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
_UpperCamelCase = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
_UpperCamelCase = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def UpperCamelCase__ (self , __a , __a , __a=None , __a=None , __a=None , __a=None , __a="auto" , __a=-1 , __a=0.9 , __a=5 , __a=500 , __a="gpt2-large" , __a=-1 , __a=1024 , __a=25 , __a=5 , __a=True , __a=25 , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 146 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Any = RobertaTokenizer
_UpperCAmelCase :Union[str, Any] = RobertaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :Any = {'''cls_token''': '''<s>'''}
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase__: List[str] = dict(zip(__A , range(len(__A ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase__: List[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: int = '''lower newer'''
lowercase__: Optional[Any] = '''lower newer'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Any = '''lower newer'''
lowercase__: str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase__: int = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A , __A )
lowercase__: Tuple = tokens + [tokenizer.unk_token]
lowercase__: Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self ):
lowercase__: str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _snake_case ( self ):
lowercase__: Any = self.tokenizer_class.from_pretrained('''roberta-base''' )
lowercase__: int = tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
lowercase__: Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
lowercase__: Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__A , add_prefix_space=__A )
lowercase__: Tuple = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__A , add_prefix_space=__A )
lowercase__: Any = tokenizer.build_inputs_with_special_tokens(__A )
lowercase__: Optional[int] = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self ):
lowercase__: Any = self.get_tokenizer()
lowercase__: Tuple = '''Encode this sequence.'''
lowercase__: str = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowercase__: List[str] = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A , __A )
lowercase__: Optional[int] = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
lowercase__: str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A , __A )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowercase__: Dict = tokenizer.encode(__A , add_special_tokens=__A )
lowercase__: str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A , __A )
# Testing spaces after special tokens
lowercase__: str = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__A , lstrip=__A , rstrip=__A )} ) # mask token has a left space
lowercase__: str = tokenizer.convert_tokens_to_ids(__A )
lowercase__: str = '''Encode <mask> sequence'''
lowercase__: Union[str, Any] = '''Encode <mask>sequence'''
lowercase__: str = tokenizer.encode(__A )
lowercase__: Union[str, Any] = encoded.index(__A )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A , __A )
lowercase__: Any = tokenizer.encode(__A )
lowercase__: Optional[int] = encoded.index(__A )
lowercase__: int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A , __A )
def _snake_case ( self ):
pass
def _snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
lowercase__: Tuple = self.tokenizer_class.from_pretrained(__A , **__A )
lowercase__: Tuple = '''A, <mask> AllenNLP sentence.'''
lowercase__: List[str] = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
lowercase__: int = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase__: Any = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase__: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _snake_case ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase__: List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowercase__: Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase__: Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __A )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __A )
self.assertEqual(post_processor_state['''trim_offsets'''] , __A )
def _snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__: Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__: Optional[Any] = F"""{text_of_1_token} {text_of_1_token}"""
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowercase__: List[Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
lowercase__: Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowercase__: int = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowercase__: int = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
lowercase__: Tuple = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowercase__: Optional[int] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
lowercase__: List[str] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase__: Any = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowercase__: int = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , )
lowercase__: str = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowercase__: Tuple = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
lowercase__: int = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowercase__: List[Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
| 586 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 0 |
import math
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(snake_case__ ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case__ )
for i in range(start * start , end + 1 , snake_case__ ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , snake_case__ )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case__ , high + 1 , snake_case__ ):
UpperCAmelCase_ = False
for j in range(len(snake_case__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , snake_case__ )
return prime
print(sieve(10**6)) | 162 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowerCAmelCase : str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowerCAmelCase : Dict =[ord(letter) for letter in string.ascii_lowercase]
__lowerCAmelCase : Optional[int] ={ord(char) for char in VALID_CHARS}
__lowerCAmelCase : Optional[Any] =['the', 'be', 'to', 'of', 'and', 'in', 'that', 'have']
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = 42
__SCREAMING_SNAKE_CASE : Optional[Any] = 42
__SCREAMING_SNAKE_CASE : Optional[Any] = 42
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
__SCREAMING_SNAKE_CASE : int = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = []
for key in product(snake_case__ , repeat=3 ):
__SCREAMING_SNAKE_CASE : Optional[int] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return [possible for possible in possibles if common_word in possible.lower()]
def _UpperCamelCase ( lowercase__ = "p059_cipher.txt" ):
__SCREAMING_SNAKE_CASE : int = 42
__SCREAMING_SNAKE_CASE : Tuple = 42
__SCREAMING_SNAKE_CASE : List[Any] = 42
__SCREAMING_SNAKE_CASE : Union[str, Any] = 42
__SCREAMING_SNAKE_CASE : List[Any] = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding='''utf-8''' )
__SCREAMING_SNAKE_CASE : List[Any] = [int(snake_case__ ) for number in data.strip().split(''',''' )]
__SCREAMING_SNAKE_CASE : int = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
__SCREAMING_SNAKE_CASE : Dict = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
__SCREAMING_SNAKE_CASE : Union[str, Any] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( _A ):
"""simple docstring"""
if not nums:
return 0
_lowercase = nums[0]
_lowercase = 0
for num in nums[1:]:
_lowercase , _lowercase = (
max_excluding + num,
max(snake_case__ ,snake_case__ ),
)
return max(snake_case__ ,snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
snake_case__ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def __init__( self : List[Any] , *__a : List[str] , **__a : Optional[int] ) ->None:
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 278 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return number | (1 << position)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return number & ~(1 << position)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return number ^ (1 << position)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return ((number >> position) & 1) == 1
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 0 |
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
SCREAMING_SNAKE_CASE_ = yaml.safe_load(
'''\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
SCREAMING_SNAKE_CASE_ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE_ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
SCREAMING_SNAKE_CASE_ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
SCREAMING_SNAKE_CASE_ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
SCREAMING_SNAKE_CASE_ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
SCREAMING_SNAKE_CASE_ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
SCREAMING_SNAKE_CASE_ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
SCREAMING_SNAKE_CASE_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
SCREAMING_SNAKE_CASE_ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowercase__ ( lowerCAmelCase : str , lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(snake_case__ , snake_case__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowercase__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
with pytest.raises(snake_case__ , match=re.escape(expected_error.format(path='root' ) ) ):
UpperCAmelCase = ReadMe.from_string(snake_case__ , snake_case__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
with pytest.raises(snake_case__ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase__ ( lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
ReadMe.from_string(snake_case__ , snake_case__ , suppress_parsing_errors=snake_case__ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowercase__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = Path(snake_case__ ) / 'README.md'
with open(snake_case__ , 'w+' ) as readme_file:
readme_file.write(snake_case__ )
UpperCAmelCase = ReadMe.from_readme(snake_case__ , snake_case__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowercase__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = Path(snake_case__ ) / 'README.md'
with open(snake_case__ , 'w+' ) as readme_file:
readme_file.write(snake_case__ )
UpperCAmelCase = expected_error.format(path=snake_case__ )
with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ):
UpperCAmelCase = ReadMe.from_readme(snake_case__ , snake_case__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = Path(snake_case__ ) / 'README.md'
with open(snake_case__ , 'w+' ) as readme_file:
readme_file.write(snake_case__ )
UpperCAmelCase = expected_error.format(path=snake_case__ )
with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ):
ReadMe.from_readme(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase__ ( lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = Path(snake_case__ ) / 'README.md'
with open(snake_case__ , 'w+' ) as readme_file:
readme_file.write(snake_case__ )
ReadMe.from_readme(snake_case__ , snake_case__ , suppress_parsing_errors=snake_case__ )
| 373 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 0 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ : Tuple = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='''maskformer'''
__a ={'''hidden_size''': '''mask_feature_size'''}
__a =['''resnet''', '''swin''']
__a =['''detr''']
def __init__( self : Any , __a : int = 2_56 , __a : int = 2_56 , __a : float = 0.1 , __a : bool = False , __a : Optional[Dict] = None , __a : Optional[Dict] = None , __a : float = 0.02 , __a : float = 1.0 , __a : float = 1.0 , __a : float = 1.0 , __a : float = 20.0 , __a : Optional[bool] = None , **__a : Tuple , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_a = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__A , __A ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_a = DetrConfig()
else:
# verify that the decoder is supported
_a = (
decoder_config.pop("model_type" ) if isinstance(__A , __A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(__A , __A ):
_a = CONFIG_MAPPING[decoder_type]
_a = config_class.from_dict(__A )
_a = backbone_config
_a = decoder_config
# main feature dimension for the model
_a = fpn_feature_size
_a = mask_feature_size
# initializer
_a = init_std
_a = init_xavier_std
# Hungarian matcher && loss
_a = cross_entropy_weight
_a = dice_weight
_a = mask_weight
_a = use_auxiliary_loss
_a = no_object_weight
_a = output_auxiliary_logits
_a = self.decoder_config.encoder_attention_heads
_a = self.decoder_config.num_hidden_layers
super().__init__(**__A )
@classmethod
def UpperCamelCase__ ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Union[str, Any] ):
return cls(
backbone_config=__A , decoder_config=__A , **__A , )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.decoder_config.to_dict()
_a = self.__class__.model_type
return output
| 692 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=None , A=2 , ) -> List[str]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE = num_patches + 1
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_( self ) -> str:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case_( self , A , A , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = ViTModel(config=__A )
model.to(__A )
model.eval()
_SCREAMING_SNAKE_CASE = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_( self , A , A , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = ViTForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_SCREAMING_SNAKE_CASE = model(__A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = ViTForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(__A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_( self , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE = ViTForImageClassification(__A )
model.to(__A )
model.eval()
_SCREAMING_SNAKE_CASE = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = ViTForImageClassification(__A )
model.to(__A )
model.eval()
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ViTModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def snake_case_( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def snake_case_( self ) -> List[Any]:
pass
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(__A )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def snake_case_( self ) -> Any:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = ViTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase ( ) ->Dict:
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_( self ) -> List[str]:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(__A )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**__A )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
_SCREAMING_SNAKE_CASE = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
def snake_case_( self ) -> Any:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_SCREAMING_SNAKE_CASE = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(__A )
_SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__A , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = inputs.pixel_values.to(__A )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(__A , interpolate_pos_encoding=__A )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__A , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = inputs.pixel_values.to(__A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(__A )
| 314 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = '''data2vec-text'''
def __init__( self : Any , _a : Union[str, Any]=3_0522 , _a : List[Any]=768 , _a : Dict=12 , _a : str=12 , _a : str=3072 , _a : int="gelu" , _a : Any=0.1 , _a : Optional[int]=0.1 , _a : Tuple=512 , _a : Optional[Any]=2 , _a : str=0.02 , _a : List[str]=1E-12 , _a : List[str]=1 , _a : Tuple=0 , _a : Any=2 , _a : Optional[Any]="absolute" , _a : Tuple=True , _a : Tuple=None , **_a : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =position_embedding_type
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =classifier_dropout
class A__ ( UpperCamelCase__ ):
@property
def __UpperCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 691 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def UpperCamelCase_( snake_case__: Dict ) -> Optional[int]:
UpperCAmelCase__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
UpperCAmelCase__ = [1_44, 1_92, 2_40]
UpperCAmelCase__ = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
UpperCAmelCase__ = [96, 1_20, 1_44]
UpperCAmelCase__ = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
UpperCAmelCase__ = [64, 80, 96]
UpperCAmelCase__ = [16, 16, 24, 48, 64, 80, 3_20]
UpperCAmelCase__ = 0.0_5
UpperCAmelCase__ = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
UpperCAmelCase__ = 5_12
UpperCAmelCase__ = 16
UpperCAmelCase__ = 21
UpperCAmelCase__ = 'pascal-voc-id2label.json'
else:
UpperCAmelCase__ = 10_00
UpperCAmelCase__ = 'imagenet-1k-id2label.json'
UpperCAmelCase__ = 'huggingface/label-files'
UpperCAmelCase__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_( snake_case__: int , snake_case__: Dict=False ) -> List[str]:
for i in range(1 , 6 ):
if f"layer_{i}." in name:
UpperCAmelCase__ = name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
UpperCAmelCase__ = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
UpperCAmelCase__ = name.replace('.block.' , '.' )
if "exp_1x1" in name:
UpperCAmelCase__ = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
UpperCAmelCase__ = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
UpperCAmelCase__ = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
UpperCAmelCase__ = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
UpperCAmelCase__ = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
UpperCAmelCase__ = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
UpperCAmelCase__ = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
UpperCAmelCase__ = name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
UpperCAmelCase__ = name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
UpperCAmelCase__ = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
UpperCAmelCase__ = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
UpperCAmelCase__ = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
UpperCAmelCase__ = name.replace(f".global_rep.{i}.weight" , '.layernorm.weight' )
if f".global_rep.{i}.bias" in name:
UpperCAmelCase__ = name.replace(f".global_rep.{i}.bias" , '.layernorm.bias' )
if ".global_rep." in name:
UpperCAmelCase__ = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
UpperCAmelCase__ = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
UpperCAmelCase__ = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
UpperCAmelCase__ = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
UpperCAmelCase__ = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
UpperCAmelCase__ = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
UpperCAmelCase__ = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
UpperCAmelCase__ = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
UpperCAmelCase__ = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
UpperCAmelCase__ = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
UpperCAmelCase__ = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
UpperCAmelCase__ = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
UpperCAmelCase__ = 'mobilevit.' + name
return name
def UpperCamelCase_( snake_case__: int , snake_case__: Dict , snake_case__: Tuple=False ) -> Optional[int]:
if base_model:
UpperCAmelCase__ = ''
else:
UpperCAmelCase__ = 'mobilevit.'
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
UpperCAmelCase__ = key[8:]
if "qkv" in key:
UpperCAmelCase__ = key.split('.' )
UpperCAmelCase__ = int(key_split[0][6:] ) - 1
UpperCAmelCase__ = int(key_split[3] )
UpperCAmelCase__ = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
UpperCAmelCase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
UpperCAmelCase__ = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( ) -> Tuple:
UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( snake_case__: int , snake_case__: Tuple , snake_case__: List[str] , snake_case__: Tuple=False ) -> Optional[Any]:
UpperCAmelCase__ = get_mobilevit_config(snake_case__ )
# load original state_dict
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
UpperCAmelCase__ = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
UpperCAmelCase__ = MobileViTForImageClassification(snake_case__ ).eval()
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase__ = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCAmelCase__ = model(**snake_case__ )
UpperCAmelCase__ = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
UpperCAmelCase__ = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
UpperCAmelCase__ = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
UpperCAmelCase__ = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
UpperCAmelCase__ = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
UpperCAmelCase__ = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
UpperCAmelCase__ = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
UpperCAmelCase__ = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
UpperCAmelCase__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__ , organization='apple' )
model.push_to_hub(snake_case__ , organization='apple' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 146 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowercase__: Union[str, Any] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__: Any = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowercase__: Optional[Any] = F"""{src_lang}-{tgt_lang}"""
lowercase__: Optional[Any] = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
lowercase__: Optional[int] = os.path.join(snake_case__ , '''README.md''' )
print(F"""Generating {path}""" )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
__A = Path(__file__).resolve().parent.parent.parent
__A = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__A = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 586 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowerCAmelCase ( A = 8 ):
UpperCAmelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) )
def __lowerCAmelCase ( A , A ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(snake_case__ )
UpperCAmelCase_ = i // 3
UpperCAmelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase_ = (
chars_incl
+ random(snake_case__ , quotient + remainder )
+ random(snake_case__ , snake_case__ )
+ random(snake_case__ , snake_case__ )
)
UpperCAmelCase_ = list(snake_case__ )
shuffle(snake_case__ )
return "".join(snake_case__ )
# random is a generalised function for letters, characters and numbers
def __lowerCAmelCase ( A , A ):
return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) )
def __lowerCAmelCase ( A , A ):
pass # Put your code here...
def __lowerCAmelCase ( A , A ):
pass # Put your code here...
def __lowerCAmelCase ( A , A ):
pass # Put your code here...
def __lowerCAmelCase ( A , A = 8 ):
if len(snake_case__ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase_ = any(char in ascii_uppercase for char in password )
UpperCAmelCase_ = any(char in ascii_lowercase for char in password )
UpperCAmelCase_ = any(char in digits for char in password )
UpperCAmelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowerCAmelCase ( ):
UpperCAmelCase_ = int(input("Please indicate the max length of your password: " ).strip() )
UpperCAmelCase_ = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(snake_case__ ) )
print(
"Alternative Password generated:" , alternative_password_generator(snake_case__ , snake_case__ ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main() | 162 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Tuple ) -> List[str]:
super().__init__()
__SCREAMING_SNAKE_CASE : int = nn.Linear(3 , 4 )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.BatchNormad(4 )
__SCREAMING_SNAKE_CASE : str = nn.Linear(4 , 5 )
def __magic_name__( self :int , lowerCAmelCase__ :Tuple ) -> int:
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :int , lowerCAmelCase__ :Union[str, Any] , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :Dict ) -> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
return output + 1
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ModelForTest()
__SCREAMING_SNAKE_CASE : Optional[int] = ModelHook()
add_hook_to_module(__A , __A )
self.assertEqual(test_model._hf_hook , __A )
self.assertTrue(hasattr(__A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A , '''_hf_hook''' ) )
self.assertFalse(hasattr(__A , '''_old_forward''' ) )
def __magic_name__( self :Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = ModelForTest()
__SCREAMING_SNAKE_CASE : List[str] = ModelHook()
add_hook_to_module(__A , __A )
add_hook_to_module(__A , __A , append=__A )
self.assertEqual(isinstance(test_model._hf_hook , __A ) , __A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A , '''_hf_hook''' ) )
self.assertFalse(hasattr(__A , '''_old_forward''' ) )
def __magic_name__( self :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = ModelForTest()
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : Tuple = test_model(x + 1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = test_model(x + 2 )
__SCREAMING_SNAKE_CASE : int = PreForwardHook()
add_hook_to_module(__A , __A )
__SCREAMING_SNAKE_CASE : Any = test_model(__A )
self.assertTrue(torch.allclose(__A , __A , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__SCREAMING_SNAKE_CASE : List[str] = PreForwardHook()
add_hook_to_module(__A , __A )
__SCREAMING_SNAKE_CASE : Any = test_model(__A )
self.assertTrue(torch.allclose(__A , __A , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__SCREAMING_SNAKE_CASE : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__A , __A )
__SCREAMING_SNAKE_CASE : str = test_model(__A )
assert torch.allclose(__A , __A , atol=1E-5 )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = ModelForTest()
__SCREAMING_SNAKE_CASE : Optional[int] = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : Any = test_model(__A )
__SCREAMING_SNAKE_CASE : str = PostForwardHook()
add_hook_to_module(__A , __A )
__SCREAMING_SNAKE_CASE : Optional[int] = test_model(__A )
self.assertTrue(torch.allclose(__A , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__SCREAMING_SNAKE_CASE : str = PostForwardHook()
add_hook_to_module(__A , __A )
__SCREAMING_SNAKE_CASE : List[Any] = test_model(__A )
self.assertTrue(torch.allclose(__A , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__SCREAMING_SNAKE_CASE : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__A , __A )
__SCREAMING_SNAKE_CASE : Any = test_model(__A )
assert torch.allclose(__A , output + 2 , atol=1E-5 )
def __magic_name__( self :Any ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = ModelForTest()
__SCREAMING_SNAKE_CASE : Tuple = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : int = test_model(__A )
__SCREAMING_SNAKE_CASE : List[str] = PostForwardHook()
add_hook_to_module(__A , __A )
__SCREAMING_SNAKE_CASE : List[Any] = test_model(__A )
self.assertTrue(torch.allclose(__A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Any = test_model(__A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__SCREAMING_SNAKE_CASE : List[str] = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : Any = model(__A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__A , AlignDevicesHook(io_same_device=__A ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(2 , 3 ).to(0 )
__SCREAMING_SNAKE_CASE : int = model(__A )
self.assertEqual(output.device , torch.device(0 ) )
def __magic_name__( self :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__SCREAMING_SNAKE_CASE : int = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , __A )
__SCREAMING_SNAKE_CASE : str = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
__SCREAMING_SNAKE_CASE : int = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : Optional[int] = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__SCREAMING_SNAKE_CASE : List[str] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(__A , execution_device=__A , offload=__A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device , __A )
__SCREAMING_SNAKE_CASE : int = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : str = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__A , execution_device=__A , offload=__A , offload_buffers=__A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__SCREAMING_SNAKE_CASE : str = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : str = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def __magic_name__( self :str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__SCREAMING_SNAKE_CASE : Tuple = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
__A , execution_device=__A , offload=__A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__SCREAMING_SNAKE_CASE : List[str] = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device , __A )
__SCREAMING_SNAKE_CASE : int = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : List[str] = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__A , execution_device=__A , offload=__A , weights_map=model.state_dict() , offload_buffers=__A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__SCREAMING_SNAKE_CASE : Dict = torch.randn(2 , 3 )
__SCREAMING_SNAKE_CASE : str = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 696 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
_lowercase = OmegaConf.load(snake_case__ )
_lowercase = torch.load(snake_case__ ,map_location="""cpu""" )["""model"""]
_lowercase = list(state_dict.keys() )
# extract state_dict for VQVAE
_lowercase = {}
_lowercase = """first_stage_model."""
for key in keys:
if key.startswith(snake_case__ ):
_lowercase = state_dict[key]
# extract state_dict for UNetLDM
_lowercase = {}
_lowercase = """model.diffusion_model."""
for key in keys:
if key.startswith(snake_case__ ):
_lowercase = state_dict[key]
_lowercase = config.model.params.first_stage_config.params
_lowercase = config.model.params.unet_config.params
_lowercase = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
_lowercase = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
_lowercase = DDIMScheduler(
timesteps=config.model.params.timesteps ,beta_schedule="""scaled_linear""" ,beta_start=config.model.params.linear_start ,beta_end=config.model.params.linear_end ,clip_sample=snake_case__ ,)
_lowercase = LDMPipeline(snake_case__ ,snake_case__ ,snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_: Dict = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
A_: Optional[int] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 398 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Tuple = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 278 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = args.pruning_method
SCREAMING_SNAKE_CASE = args.threshold
SCREAMING_SNAKE_CASE = args.model_name_or_path.rstrip('/')
SCREAMING_SNAKE_CASE = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''')
SCREAMING_SNAKE_CASE = torch.load(os.path.join(snake_case__ , 'pytorch_model.bin'))
SCREAMING_SNAKE_CASE = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE = tensor
print(F'''Copied layer {name}''')
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE = tensor
print(F'''Copied layer {name}''')
elif "bias" in name:
SCREAMING_SNAKE_CASE = tensor
print(F'''Copied layer {name}''')
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE = MagnitudeBinarizer.apply(inputs=snake_case__ , threshold=snake_case__)
SCREAMING_SNAKE_CASE = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F'''{prefix_}mask_scores''']
SCREAMING_SNAKE_CASE = TopKBinarizer.apply(snake_case__ , snake_case__)
SCREAMING_SNAKE_CASE = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F'''{prefix_}mask_scores''']
SCREAMING_SNAKE_CASE = ThresholdBinarizer.apply(snake_case__ , snake_case__ , snake_case__)
SCREAMING_SNAKE_CASE = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F'''{prefix_}mask_scores''']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -0.1, 1.1
SCREAMING_SNAKE_CASE = torch.sigmoid(snake_case__)
SCREAMING_SNAKE_CASE = s * (r - l) + l
SCREAMING_SNAKE_CASE = s_bar.clamp(min=0.0 , max=1.0)
SCREAMING_SNAKE_CASE = tensor * mask
print(F'''Pruned layer {name}''')
else:
raise ValueError('Unknown pruning method')
if target_model_path is None:
SCREAMING_SNAKE_CASE = os.path.join(
os.path.dirname(snake_case__) , F'''bertarized_{os.path.basename(snake_case__)}''')
if not os.path.isdir(snake_case__):
shutil.copytree(snake_case__ , snake_case__)
print(F'''\nCreated folder {target_model_path}''')
torch.save(snake_case__ , os.path.join(snake_case__ , 'pytorch_model.bin'))
print('\nPruned model saved! See you later!')
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
a_ : List[Any] = parser.parse_args()
main(args)
| 73 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a_ ( self ) -> str:
UpperCAmelCase = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
UpperCAmelCase = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(__A )
from datasets import load_dataset
UpperCAmelCase = load_dataset('nielsr/rvlcdip-demo' )
UpperCAmelCase = dataset['train'][0]['image'].convert('RGB' )
UpperCAmelCase = image_processor(__A , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__A )
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __A )
UpperCAmelCase = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=__A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __A , atol=1E-4 ) )
| 373 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : list[int] , lowercase : int , lowercase : int , lowercase : int ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_a , _a = array[indexa], array[indexa]
def _lowerCamelCase ( lowercase : list[int] , lowercase : int , lowercase : int , lowercase : int ) -> None:
if length > 1:
_a = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def _lowerCamelCase ( lowercase : list[int] , lowercase : int , lowercase : int , lowercase : int ) -> None:
if length > 1:
_a = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase_ : str = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 692 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
'''simple docstring'''
class a_ :
'''simple docstring'''
def __init__( self , A , A , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = name
_SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = weight
def __repr__( self ) -> List[Any]:
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def snake_case_( self ) -> Dict:
return self.value
def snake_case_( self ) -> Union[str, Any]:
return self.name
def snake_case_( self ) -> List[str]:
return self.weight
def snake_case_( self ) -> Optional[int]:
return self.value / self.weight
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ) ->List[str]:
_SCREAMING_SNAKE_CASE = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) ->Any:
_SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase ( ) ->Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Tuple = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE = None
def __init__(self , __a=None , __a=None , __a=None , __a="<unk>" , __a="<s>" , __a="</s>" , __a="<pad>" , __a=False , __a=False , **__a , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , add_prefix_space=__A , clean_up_tokenization_spaces=__A , **__A , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __A ) != add_prefix_space:
UpperCAmelCase__ = getattr(__A , pre_tok_state.pop('type' ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**__A )
UpperCAmelCase__ = add_prefix_space
def UpperCamelCase__ (self , *__a , **__a ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase__ = kwargs.get('is_split_into_words' , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._batch_encode_plus(*__A , **__A )
def UpperCamelCase__ (self , *__a , **__a ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase__ = kwargs.get('is_split_into_words' , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._encode_plus(*__A , **__A )
def UpperCamelCase__ (self , __a , __a = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def UpperCamelCase__ (self , __a ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
UpperCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 146 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: List[str] = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
lowercase__: Optional[int] = 1_0_2_4
lowercase__: Optional[int] = 4_0_9_6
lowercase__: Union[str, Any] = 2_4
lowercase__: Optional[Any] = 1_6
lowercase__: str = [5, 1_1, 1_7, 2_3]
lowercase__: Union[str, Any] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
lowercase__: List[Any] = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
lowercase__: Any = 7_6_8
lowercase__: Union[str, Any] = [1, 1, 1, 0.5]
lowercase__: List[str] = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
lowercase__: Union[str, Any] = 1_5_0
lowercase__: Tuple = 1_6
lowercase__: List[str] = (1, 3_8_4, 3_8_4)
lowercase__: Any = False
lowercase__: Optional[Any] = '''project'''
if "ade" in checkpoint_url:
lowercase__: Union[str, Any] = True
lowercase__: List[Any] = 7_6_8
lowercase__: Optional[Any] = [1, 1, 1, 0.5]
lowercase__: Dict = 1_5_0
lowercase__: Optional[int] = 1_6
lowercase__: Optional[int] = '''huggingface/label-files'''
lowercase__: Optional[int] = '''ade20k-id2label.json'''
lowercase__: Union[str, Any] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__: List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
lowercase__: int = idalabel
lowercase__: Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__: Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: Any = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__: List[str] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
lowercase__: Dict = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
lowercase__: int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
lowercase__: str = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
lowercase__: Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
lowercase__: Union[str, Any] = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
lowercase__: Dict = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
lowercase__: int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__: Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
lowercase__: Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
lowercase__: int = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
lowercase__: Union[str, Any] = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
lowercase__: Tuple = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
lowercase__: Any = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
lowercase__: Union[str, Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
lowercase__: Dict = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
lowercase__: int = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
lowercase__: Dict = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__: Union[str, Any] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowercase__: Union[str, Any] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
lowercase__: Tuple = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
lowercase__: Any = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
lowercase__: Tuple = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
lowercase__: int = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__: Optional[Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__: Any = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__: List[str] = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__: Optional[int] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__: Dict = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
lowercase__: str = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
lowercase__: Any = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
lowercase__: Any = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
lowercase__: int = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
lowercase__: Optional[Any] = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
lowercase__: Optional[Any] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
lowercase__: Union[str, Any] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
lowercase__: Optional[Any] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
lowercase__: Union[str, Any] = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
lowercase__: str = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
lowercase__: List[Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
lowercase__: Tuple = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
lowercase__: Dict = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
lowercase__: Any = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowercase__: List[Any] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
lowercase__: int = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
lowercase__: Dict = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
lowercase__: Union[str, Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
lowercase__: Optional[int] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
lowercase__: Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__: str = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowercase__: Optional[int] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__: int = in_proj_weight[: config.hidden_size, :]
lowercase__: Optional[Any] = in_proj_bias[: config.hidden_size]
lowercase__: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__: Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__: Optional[int] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__: int = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__, lowercase__: int = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase__: Optional[int] = torch.load(snake_case__ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
lowercase__: List[Any] = state_dict.pop(snake_case__ )
lowercase__: Any = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
lowercase__: Dict = DPTForSemanticSegmentation(snake_case__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
lowercase__: int = 4_8_0 if '''ade''' in checkpoint_url else 3_8_4
lowercase__: List[str] = DPTImageProcessor(size=snake_case__ )
lowercase__: Optional[Any] = prepare_img()
lowercase__: Tuple = image_processor(snake_case__ , return_tensors='''pt''' )
# forward pass
lowercase__: Tuple = model(**snake_case__ ).logits if '''ade''' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
lowercase__: Optional[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
__A = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 586 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = CanineTokenizer
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self : List[str] ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def __A ( self : Any , **lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
UpperCAmelCase_ = 1_024
return tokenizer
@require_torch
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = self.canine_tokenizer
UpperCAmelCase_ = ["Life is like a box of chocolates.", "You never know what you\'re gonna get."]
# fmt: off
UpperCAmelCase_ = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(__A , padding=__A , return_tensors="pt" )
self.assertIsInstance(__A , __A )
UpperCAmelCase_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__A , __A )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.canine_tokenizer
UpperCAmelCase_ = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ = tokenizer(__A , padding=__A , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __A )
self.assertIn("attention_mask" , __A )
self.assertIn("token_type_ids" , __A )
@require_torch
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = self.canine_tokenizer
UpperCAmelCase_ = [
"What\'s the weater?",
"It\'s about 25 degrees.",
]
UpperCAmelCase_ = tokenizer(
text_target=__A , max_length=32 , padding="max_length" , truncation=__A , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.encode(__A , add_special_tokens=__A )
tokenizer.save_pretrained(__A )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__A )
UpperCAmelCase_ = after_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
shutil.rmtree(__A )
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ = chr(0XE007 )
additional_special_tokens.append(__A )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ = tokenizer.encode(__A , add_special_tokens=__A )
tokenizer.save_pretrained(__A )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__A )
UpperCAmelCase_ = after_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
self.assertIn(__A , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(__A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__A )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ , UpperCAmelCase_ = self.get_clean_sequence(__A )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ = 0XE005
UpperCAmelCase_ = chr(__A )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ = tokenizer.encode(__A , add_special_tokens=__A )
self.assertEqual(len(__A ) , 1 )
UpperCAmelCase_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__A )
UpperCAmelCase_ = tokenizer.encode(__A , add_special_tokens=__A )
UpperCAmelCase_ = tokenizer.encode(__A , add_special_tokens=__A )
UpperCAmelCase_ = tokenizer.encode(__A , add_special_tokens=__A )
self.assertEqual(__A , input_encoded + special_token_id )
UpperCAmelCase_ = tokenizer.decode(__A , skip_special_tokens=__A )
self.assertTrue(special_token not in decoded )
def __A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ = chr(0XE005 )
UpperCAmelCase_ = chr(0XE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__A )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ = tokenizer.tokenize(__A )
UpperCAmelCase_ = tokenizer.tokenize(__A )
self.assertEqual(len(__A ) , 1 )
self.assertEqual(len(__A ) , 1 )
self.assertEqual(token_a[0] , __A )
self.assertEqual(token_a[0] , __A )
@require_tokenizers
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ = 0XE006
UpperCAmelCase_ = chr(__A )
UpperCAmelCase_ = AddedToken(__A , lstrip=__A )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__A )
tokenizer.from_pretrained(__A )
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__A )
with open(os.path.join(__A , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(__A )
with open(os.path.join(__A , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(__A )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ = 0XE006
UpperCAmelCase_ = chr(__A )
UpperCAmelCase_ = [new_token_a]
UpperCAmelCase_ = [new_token_a]
with open(os.path.join(__A , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__A , __A )
with open(os.path.join(__A , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__A , __A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(__A , extra_ids=0 )
self.assertIn(__A , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ = 0XE007
UpperCAmelCase_ = chr(__A )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = [AddedToken(__A , lstrip=__A )]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
__A , additional_special_tokens=__A , extra_ids=0 )
self.assertIn(__A , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ = "hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ = "[CLS] hello world [SEP]"
else:
UpperCAmelCase_ = input
UpperCAmelCase_ = tokenizer.encode(__A , add_special_tokens=__A )
UpperCAmelCase_ = tokenizer.decode(__A , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__A , [output, output.lower()] )
def __A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ = "a"
UpperCAmelCase_ = ord(__A )
for attr in attributes_list:
setattr(__A , attr + "_id" , __A )
self.assertEqual(getattr(__A , __A ) , __A )
self.assertEqual(getattr(__A , attr + "_id" ) , __A )
setattr(__A , attr + "_id" , __A )
self.assertEqual(getattr(__A , __A ) , __A )
self.assertEqual(getattr(__A , attr + "_id" ) , __A )
setattr(__A , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__A , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__A , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ = 0XE006
UpperCAmelCase_ = chr(__A )
setattr(__A , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__A , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__A , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def __A ( self : Any ):
'''simple docstring'''
pass
def __A ( self : Any ):
'''simple docstring'''
pass
def __A ( self : List[str] ):
'''simple docstring'''
pass
def __A ( self : int ):
'''simple docstring'''
pass
def __A ( self : Tuple ):
'''simple docstring'''
pass
def __A ( self : str ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
pass
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass | 162 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase : Optional[Any] ={
'169M': 1_2,
'430M': 2_4,
'1B5': 2_4,
'3B': 3_2,
'7B': 3_2,
'14B': 4_0,
}
__lowerCAmelCase : Union[str, Any] ={
'169M': 7_6_8,
'430M': 1_0_2_4,
'1B5': 2_0_4_8,
'3B': 2_5_6_0,
'7B': 4_0_9_6,
'14B': 5_1_2_0,
}
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = list(state_dict.keys() )
for name in state_dict_keys:
__SCREAMING_SNAKE_CASE : str = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('''emb.''' ):
__SCREAMING_SNAKE_CASE : str = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__SCREAMING_SNAKE_CASE : str = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ )
# ffn -> feed_forward
__SCREAMING_SNAKE_CASE : Dict = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__SCREAMING_SNAKE_CASE : int = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__SCREAMING_SNAKE_CASE : Optional[int] = '''rwkv.''' + name
__SCREAMING_SNAKE_CASE : Any = weight
return state_dict
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 50277
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__SCREAMING_SNAKE_CASE : Tuple = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
__SCREAMING_SNAKE_CASE : Tuple = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__SCREAMING_SNAKE_CASE : Optional[Any] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
__SCREAMING_SNAKE_CASE : str = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_download(snake_case__ , snake_case__ )
__SCREAMING_SNAKE_CASE : str = torch.load(snake_case__ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(snake_case__ )
# 4. Split in shards and save
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__SCREAMING_SNAKE_CASE : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__SCREAMING_SNAKE_CASE : Dict = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
__lowerCAmelCase : Tuple =parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 696 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A_: str = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = '''albert'''
def __init__( self , UpperCAmelCase=30000 , UpperCAmelCase=128 , UpperCAmelCase=4096 , UpperCAmelCase=12 , UpperCAmelCase=1 , UpperCAmelCase=64 , UpperCAmelCase=16384 , UpperCAmelCase=1 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0.1 , UpperCAmelCase="absolute" , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=3 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowercase = vocab_size
_lowercase = embedding_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_hidden_groups
_lowercase = num_attention_heads
_lowercase = inner_group_num
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = classifier_dropout_prob
_lowercase = position_embedding_type
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 398 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
from __future__ import annotations
import math
def __lowerCamelCase ( A__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( A__ : int ) -> list[int]:
lowerCamelCase_ : Optional[int] = str(snake_case__ )
lowerCamelCase_ : Any = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __lowerCamelCase ( A__ : int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def __lowerCamelCase ( A__ : int = 11 ) -> list[int]:
lowerCamelCase_ : str = []
lowerCamelCase_ : List[str] = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
lowerCamelCase_ : Optional[Any] = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def __lowerCamelCase ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(11)) = }')
| 278 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
def lowerCamelCase__ (_UpperCAmelCase = 100):
SCREAMING_SNAKE_CASE = n * (n + 1) * (2 * n + 1) / 6
SCREAMING_SNAKE_CASE = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 0 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE_ = get_logger()
SCREAMING_SNAKE_CASE_ = None
class _UpperCAmelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> str:
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
F"Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` "
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
UpperCAmelCase = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
UpperCAmelCase = str(jax.devices()[0] )
UpperCAmelCase = jnp_array_kwargs
@staticmethod
def a_ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__A ): device for device in jax.devices()}
def a_ ( self , lowercase_ ) -> Optional[int]:
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def a_ ( self , lowercase_ ) -> Dict:
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase = {'dtype': jnp.intaa}
else:
UpperCAmelCase = {'dtype': jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
UpperCAmelCase = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def a_ ( self , lowercase_ ) -> List[str]:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , '__array__' ) and not isinstance(__A , jax.Array ):
UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def a_ ( self , lowercase_ ) -> Tuple:
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def a_ ( self , lowercase_ ) -> Mapping:
UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__A )
UpperCAmelCase = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def a_ ( self , lowercase_ ) -> "jax.Array":
UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__A )
UpperCAmelCase = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
UpperCAmelCase = self.recursive_tensorize(__A )
UpperCAmelCase = self._consolidate(__A )
return column
def a_ ( self , lowercase_ ) -> Mapping:
UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__A )
UpperCAmelCase = self.python_features_decoder.decode_batch(__A )
UpperCAmelCase = self.recursive_tensorize(__A )
for column_name in batch:
UpperCAmelCase = self._consolidate(batch[column_name] )
return batch
| 373 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='''dpr'''
def __init__( self : int , __a : Union[str, Any]=3_05_22 , __a : Optional[int]=7_68 , __a : int=12 , __a : List[Any]=12 , __a : Optional[Any]=30_72 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : List[Any]=0.1 , __a : str=5_12 , __a : List[str]=2 , __a : Tuple=0.02 , __a : Tuple=1e-1_2 , __a : List[Any]=0 , __a : List[str]="absolute" , __a : int = 0 , **__a : int , ):
super().__init__(pad_token_id=__A , **__A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = projection_dim
_a = position_embedding_type
| 692 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
'''simple docstring'''
def __init__( self , A , A=99 , A=13 , A=7 , A=9 , A=True , A=True , A=False , A=32 , A=5 , A=4 , A=37 , A=8 , A=0.1 , A=0.002 , A=1 , A=0 , A=0 , A=None , A=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = encoder_seq_length
_SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
_SCREAMING_SNAKE_CASE = self.decoder_seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = d_ff
_SCREAMING_SNAKE_CASE = relative_attention_num_buckets
_SCREAMING_SNAKE_CASE = dropout_rate
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = decoder_start_token_id
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = decoder_layers
def snake_case_( self ) -> Dict:
return TaConfig.from_pretrained("""google/umt5-base""" )
def snake_case_( self , A , A , A , A=None , A=None , A=None , A=None , A=None , ) -> Tuple:
if attention_mask is None:
_SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__A )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__A )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
_SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
_SCREAMING_SNAKE_CASE = self.get_config()
_SCREAMING_SNAKE_CASE = config.num_attention_heads
_SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(__A , __A , __A )
return config, input_dict
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case_( self ) -> Tuple:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case_( self ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case_( self , A , A , A , A , A , A , ) -> Tuple:
_SCREAMING_SNAKE_CASE = UMTaModel(config=__A )
model.to(__A )
model.eval()
_SCREAMING_SNAKE_CASE = model(
input_ids=__A , decoder_input_ids=__A , attention_mask=__A , decoder_attention_mask=__A , )
_SCREAMING_SNAKE_CASE = model(input_ids=__A , decoder_input_ids=__A )
_SCREAMING_SNAKE_CASE = result.last_hidden_state
_SCREAMING_SNAKE_CASE = result.past_key_values
_SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def snake_case_( self , A , A , A , A , A , A , ) -> List[str]:
_SCREAMING_SNAKE_CASE = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_SCREAMING_SNAKE_CASE = model(__A , use_cache=__A )
_SCREAMING_SNAKE_CASE = model(__A )
_SCREAMING_SNAKE_CASE = model(__A , use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
_SCREAMING_SNAKE_CASE = model(__A )["""last_hidden_state"""]
_SCREAMING_SNAKE_CASE = model(__A , past_key_values=__A )["""last_hidden_state"""]
# select random slice
_SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
_SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) )
def snake_case_( self , A , A , ) -> int:
_SCREAMING_SNAKE_CASE = UMTaModel(config=__A ).to(__A ).half().eval()
_SCREAMING_SNAKE_CASE = model(**__A )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCamelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCamelCase = [0.8, 0.9]
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=__A , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = config_and_inputs[0]
_SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_SCREAMING_SNAKE_CASE = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__A ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
}
for attn_name, (name, mask) in zip(__A , head_masking.items() ):
_SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_heads , device=__A )
_SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__A , return_dict_in_generate=__A , **__A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def snake_case_( self ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__A ).to(__A )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__A , legacy=__A )
_SCREAMING_SNAKE_CASE = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_SCREAMING_SNAKE_CASE = tokenizer(__A , return_tensors="""pt""" , padding=__A ).input_ids
# fmt: off
_SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A , __A )
_SCREAMING_SNAKE_CASE = model.generate(input_ids.to(__A ) )
_SCREAMING_SNAKE_CASE = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__A )
self.assertEqual(__A , __A )
| 314 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[Any] , *_a : Any , **_a : Tuple ) -> Dict:
"""simple docstring"""
super().__init__(*__A , **__A )
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : int , _a : int , *_a : List[Any] , **_a : Any ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =super().add_tokens(__A , *__A , **__A )
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
''' `placeholder_token` that is not already in the tokenizer.''' )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , *_a : Dict , _a : List[Any]=1 , **_a : str ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
if num_vec_per_token == 1:
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(__A ):
_SCREAMING_SNAKE_CASE =placeholder_token + f"_{i}"
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent" )
_SCREAMING_SNAKE_CASE =output
def __UpperCamelCase ( self : str , _a : int , _a : Union[str, Any]=False , _a : Union[str, Any]=1.0 ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(__A , __A ):
_SCREAMING_SNAKE_CASE =[]
for i in range(len(__A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_SCREAMING_SNAKE_CASE =self.token_map[placeholder_token]
_SCREAMING_SNAKE_CASE =tokens[: 1 + int(len(__A ) * prop_tokens_to_load )]
if vector_shuffle:
_SCREAMING_SNAKE_CASE =copy.copy(__A )
random.shuffle(__A )
_SCREAMING_SNAKE_CASE =text.replace(__A , ''' '''.join(__A ) )
return text
def __call__( self : Union[str, Any] , _a : Optional[int] , *_a : Optional[Any] , _a : Optional[Any]=False , _a : int=1.0 , **_a : Dict ) -> Tuple:
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
def __UpperCamelCase ( self : int , _a : Dict , *_a : Optional[int] , _a : int=False , _a : str=1.0 , **_a : Optional[Any] ) -> List[str]:
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , ) | 691 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''ChineseCLIPFeatureExtractor''']
_UpperCamelCase = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 146 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__A = False
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowercase__: Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase__: Optional[int] = torch.manual_seed(0 )
lowercase__: Tuple = pipe.dual_guided(
prompt='''first prompt''' , image=__A , text_to_image_strength=0.75 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
lowercase__: Optional[Any] = VersatileDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowercase__: List[str] = generator.manual_seed(0 )
lowercase__: Tuple = pipe.dual_guided(
prompt='''first prompt''' , image=__A , text_to_image_strength=0.75 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self ):
lowercase__: Optional[int] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowercase__: Dict = '''cyberpunk 2077'''
lowercase__: Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase__: str = torch.manual_seed(0 )
lowercase__: Dict = pipe.dual_guided(
prompt=__A , image=__A , text_to_image_strength=0.75 , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowercase__: Optional[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: int = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__: List[Any] = '''A painting of a squirrel eating a burger '''
lowercase__: Dict = torch.manual_seed(0 )
lowercase__: List[str] = pipe.text_to_image(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase__: List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: str = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__: Dict = pipe.image_variation(__A , generator=__A , output_type='''numpy''' ).images
lowercase__: int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: List[Any] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 586 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = ""
for i in table:
res += inp[i - 1]
return res
def __lowerCAmelCase ( A ):
return data[1:] + data[0]
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = ""
for i in range(len(snake_case__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = int("0b" + data[0] + data[-1] , 2 )
UpperCAmelCase_ = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCAmelCase ( A , A , A , A , A ):
UpperCAmelCase_ = message[:4]
UpperCAmelCase_ = message[4:]
UpperCAmelCase_ = apply_table(snake_case__ , snake_case__ )
UpperCAmelCase_ = xor(snake_case__ , snake_case__ )
UpperCAmelCase_ = apply_sbox(snake_case__ , temp[:4] ) # noqa: E741
UpperCAmelCase_ = apply_sbox(snake_case__ , temp[4:] )
UpperCAmelCase_ = "0" * (2 - len(snake_case__ )) + l # noqa: E741
UpperCAmelCase_ = "0" * (2 - len(snake_case__ )) + r
UpperCAmelCase_ = apply_table(l + r , snake_case__ )
UpperCAmelCase_ = xor(snake_case__ , snake_case__ )
return temp + right
if __name__ == "__main__":
_a: Optional[int] = input("""Enter 10 bit key: """)
_a: str = input("""Enter 8 bit message: """)
_a: Optional[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
_a: Tuple = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_a: Optional[Any] = [2, 4, 3, 1]
_a: Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
_a: Any = [4, 1, 3, 5, 7, 2, 8, 6]
_a: Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
_a: List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_a: List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_a: List[Any] = apply_table(key, paa_table)
_a: List[Any] = temp[:5]
_a: str = temp[5:]
_a: Optional[Any] = left_shift(left)
_a: Any = left_shift(right)
_a: Dict = apply_table(left + right, pa_table)
_a: Tuple = left_shift(left)
_a: Optional[int] = left_shift(right)
_a: Optional[Any] = left_shift(left)
_a: Union[str, Any] = left_shift(right)
_a: str = apply_table(left + right, pa_table)
# encryption
_a: Dict = apply_table(message, IP)
_a: Dict = function(expansion, sa, sa, keya, temp)
_a: int = temp[4:] + temp[:4]
_a: Tuple = function(expansion, sa, sa, keya, temp)
_a: Union[str, Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
_a: List[str] = apply_table(CT, IP)
_a: Union[str, Any] = function(expansion, sa, sa, keya, temp)
_a: int = temp[4:] + temp[:4]
_a: Tuple = function(expansion, sa, sa, keya, temp)
_a: List[Any] = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT) | 162 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : Dict = ['''CLIPEncoderLayer''']
def __init__( self :Optional[int] , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(__A )
__SCREAMING_SNAKE_CASE : List[str] = CLIPVisionModelWithProjection(config.vision_config )
__SCREAMING_SNAKE_CASE : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int]=0.5 , lowerCAmelCase__ :Union[str, Any]=0.5 ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = self.vision_model(__A )[0]
__SCREAMING_SNAKE_CASE : Any = self.p_head(__A )
__SCREAMING_SNAKE_CASE : Optional[Any] = nsfw_detected.flatten()
__SCREAMING_SNAKE_CASE : Dict = nsfw_detected > p_threshold
__SCREAMING_SNAKE_CASE : Union[str, Any] = nsfw_detected.tolist()
if any(__A ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(__A ):
if nsfw_detected_:
__SCREAMING_SNAKE_CASE : Optional[int] = np.zeros(images[idx].shape )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.w_head(__A )
__SCREAMING_SNAKE_CASE : Tuple = watermark_detected.flatten()
__SCREAMING_SNAKE_CASE : Union[str, Any] = watermark_detected > w_threshold
__SCREAMING_SNAKE_CASE : List[str] = watermark_detected.tolist()
if any(__A ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(__A ):
if watermark_detected_:
__SCREAMING_SNAKE_CASE : int = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 696 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A_: str = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_: Dict = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
A_: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 398 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCamelCase ( A__ : Tuple , A__ : Union[str, Any] , A__ : str , A__ : int ) -> Dict:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __lowerCamelCase ( A__ : Optional[Any] , A__ : List[str] , A__ : Any , A__ : int , A__ : List[str]=True ) -> int:
model.train()
lowerCamelCase_ : Optional[int] = model(snake_case__ )
lowerCamelCase_ : Union[str, Any] = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def __lowerCamelCase ( A__ : int , A__ : Dict=False ) -> Optional[int]:
set_seed(42 )
lowerCamelCase_ : List[str] = RegressionModel()
lowerCamelCase_ : List[str] = deepcopy(snake_case__ )
lowerCamelCase_ : int = RegressionDataset(length=80 )
lowerCamelCase_ : Any = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase_ : Optional[Any] = AdamW(params=model.parameters() , lr=1e-3 )
lowerCamelCase_ : List[str] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowerCamelCase_ : Optional[Any] = LambdaLR(snake_case__ , lr_lambda=lambda A__ : epoch**0.65 )
lowerCamelCase_ : Tuple = LambdaLR(snake_case__ , lr_lambda=lambda A__ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : int = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
lowerCamelCase_, lowerCamelCase_ : str = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCamelCase ( A__ : Any ) -> List[str]:
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = get_training_setup(snake_case__ )
# Use a single batch
lowerCamelCase_, lowerCamelCase_ : Optional[int] = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_, lowerCamelCase_ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase_ : Union[str, Any] = ddp_input[torch.randperm(len(snake_case__ ) )]
def __lowerCamelCase ( A__ : Any ) -> int:
# Test on distributed setup that context manager behaves properly
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Optional[Any] = get_training_setup(snake_case__ )
# Use a single batch
lowerCamelCase_, lowerCamelCase_ : Dict = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase_, lowerCamelCase_ : Tuple = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_, lowerCamelCase_ : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase_ : Tuple = ddp_input[torch.randperm(len(snake_case__ ) )]
def __lowerCamelCase ( A__ : List[str]=False , A__ : Optional[Any]=False ) -> Any:
lowerCamelCase_ : str = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Tuple = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
lowerCamelCase_, lowerCamelCase_ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase_, lowerCamelCase_ : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_, lowerCamelCase_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase_ : Tuple = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def __lowerCamelCase ( A__ : Any=False , A__ : List[str]=False ) -> Tuple:
lowerCamelCase_ : Optional[Any] = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
lowerCamelCase_, lowerCamelCase_ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase_, lowerCamelCase_ : List[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_, lowerCamelCase_ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
lowerCamelCase_ : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCamelCase ( ) -> int:
lowerCamelCase_ : Optional[int] = Accelerator()
lowerCamelCase_ : int = RegressionDataset(length=80 )
lowerCamelCase_ : int = DataLoader(snake_case__ , batch_size=16 )
lowerCamelCase_ : Optional[Any] = RegressionDataset(length=96 )
lowerCamelCase_ : int = DataLoader(snake_case__ , batch_size=16 )
lowerCamelCase_, lowerCamelCase_ : Optional[int] = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCamelCase ( ) -> Dict:
lowerCamelCase_ : List[Any] = Accelerator()
lowerCamelCase_ : List[str] = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def __lowerCamelCase ( A__ : Optional[int] ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 278 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Optional[int] = BertJapaneseTokenizer
_lowercase : Any = False
_lowercase : str = True
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
super().setUp()
SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'こんにちは、世界。 \nこんばんは、世界。'
SCREAMING_SNAKE_CASE = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_input_output_texts(__A)
SCREAMING_SNAKE_CASE = tokenizer.encode(__A , add_special_tokens=__A)
SCREAMING_SNAKE_CASE = tokenizer.decode(__A , clean_up_tokenization_spaces=__A)
return text, ids
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self) -> str:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file)
SCREAMING_SNAKE_CASE = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。')
self.assertListEqual(__A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab')
self.assertIsNotNone(__A)
SCREAMING_SNAKE_CASE = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__A)
self.assertListEqual(__A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(__A , 'wb') as handle:
pickle.dump(__A , __A)
with open(__A , 'rb') as handle:
SCREAMING_SNAKE_CASE = pickle.load(__A)
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__A)
self.assertListEqual(__A , __A)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic='unidic_lite')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic='unidic')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=__A , mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=__A , normalize_text=__A , mecab_option='-d /usr/local/lib/mecab/dic/jumandic')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=__A , mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi')
self.assertIsNotNone(__A)
SCREAMING_SNAKE_CASE = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__A)
self.assertListEqual(__A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(__A , 'wb') as handle:
pickle.dump(__A , __A)
with open(__A , 'rb') as handle:
SCREAMING_SNAKE_CASE = pickle.load(__A)
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__A)
self.assertListEqual(__A , __A)
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国', '人', '参政', '権'])
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国人', '参政権'])
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国人参政権'])
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=__A , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=__A , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=__A , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp')
self.assertIsNotNone(__A)
SCREAMING_SNAKE_CASE = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__A)
self.assertListEqual(__A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(__A , 'wb') as handle:
pickle.dump(__A , __A)
with open(__A , 'rb') as handle:
SCREAMING_SNAKE_CASE = pickle.load(__A)
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__A)
self.assertListEqual(__A , __A)
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=__A)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=__A)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=__A)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。') , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__A):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=__A , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('こんにちは') , ['こんにちは'])
self.assertListEqual(tokenizer.tokenize('こんばんは') , ['こん', '##ばんは'])
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは') , ['こん', '##ばんは', '[UNK]', 'こんにちは'])
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp')
SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。')
self.assertListEqual(__A , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'])
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは')
self.assertListEqual(__A , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'])
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese')
SCREAMING_SNAKE_CASE = tokenizer.encode('ありがとう。' , add_special_tokens=__A)
SCREAMING_SNAKE_CASE = tokenizer.encode('どういたしまして。' , add_special_tokens=__A)
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__A)
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__A , __A)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Optional[int] = BertJapaneseTokenizer
_lowercase : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE__ ( self , **a) -> int:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **__A)
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = 'こんにちは、世界。 \nこんばんは、世界。'
SCREAMING_SNAKE_CASE = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character')
SCREAMING_SNAKE_CASE = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。')
self.assertListEqual(
__A , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__A):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=__A , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('こんにちは') , ['こ', 'ん', 'に', 'ち', 'は'])
self.assertListEqual(tokenizer.tokenize('こんにちほ') , ['こ', 'ん', 'に', 'ち', '[UNK]'])
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char')
SCREAMING_SNAKE_CASE = tokenizer.encode('ありがとう。' , add_special_tokens=__A)
SCREAMING_SNAKE_CASE = tokenizer.encode('どういたしまして。' , add_special_tokens=__A)
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__A)
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__A , __A)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = 'cl-tohoku/bert-base-japanese'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__A)
self.assertIsInstance(__A , __A)
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING') as cm:
BertTokenizer.from_pretrained(__A)
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.'))
SCREAMING_SNAKE_CASE = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING') as cm:
BertJapaneseTokenizer.from_pretrained(__A)
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.'))
| 73 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''align_text_model'''
def __init__( self , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=1E-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , **lowercase_ , ) -> Union[str, Any]:
super().__init__(**__A )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = pad_token_id
@classmethod
def a_ ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__A , **__A )
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : str = '''align_vision_model'''
def __init__( self , lowercase_ = 3 , lowercase_ = 6_0_0 , lowercase_ = 2.0 , lowercase_ = 3.1 , lowercase_ = 8 , lowercase_ = [3, 3, 5, 3, 5, 5, 3] , lowercase_ = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , lowercase_ = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , lowercase_ = [] , lowercase_ = [1, 2, 2, 2, 1, 2, 1] , lowercase_ = [1, 2, 2, 3, 3, 4, 1] , lowercase_ = [1, 6, 6, 6, 6, 6, 6] , lowercase_ = 0.2_5 , lowercase_ = "swish" , lowercase_ = 2_5_6_0 , lowercase_ = "mean" , lowercase_ = 0.0_2 , lowercase_ = 0.0_0_1 , lowercase_ = 0.9_9 , lowercase_ = 0.2 , **lowercase_ , ) -> Tuple:
super().__init__(**__A )
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = width_coefficient
UpperCAmelCase = depth_coefficient
UpperCAmelCase = depth_divisor
UpperCAmelCase = kernel_sizes
UpperCAmelCase = in_channels
UpperCAmelCase = out_channels
UpperCAmelCase = depthwise_padding
UpperCAmelCase = strides
UpperCAmelCase = num_block_repeats
UpperCAmelCase = expand_ratios
UpperCAmelCase = squeeze_expansion_ratio
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dim
UpperCAmelCase = pooling_type
UpperCAmelCase = initializer_range
UpperCAmelCase = batch_norm_eps
UpperCAmelCase = batch_norm_momentum
UpperCAmelCase = drop_connect_rate
UpperCAmelCase = sum(__A ) * 4
@classmethod
def a_ ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__A , **__A )
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Tuple = '''align'''
__SCREAMING_SNAKE_CASE : Dict = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=6_4_0 , lowercase_=1.0 , lowercase_=0.0_2 , **lowercase_ , ) -> List[str]:
super().__init__(**__A )
if text_config is None:
UpperCAmelCase = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
UpperCAmelCase = AlignTextConfig(**__A )
UpperCAmelCase = AlignVisionConfig(**__A )
UpperCAmelCase = projection_dim
UpperCAmelCase = temperature_init_value
UpperCAmelCase = initializer_range
@classmethod
def a_ ( cls , lowercase_ , lowercase_ , **lowercase_ ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def a_ ( self ) -> str:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 373 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Dict=2_8123 ) -> Optional[Any]:
_a = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_a = set()
_a = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 692 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
'''simple docstring'''
from copy import deepcopy
class a_ :
'''simple docstring'''
def __init__( self , A = None , A = None ) -> None:
if arr is None and size is not None:
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = [0] * size
elif arr is not None:
self.init(__A )
else:
raise ValueError("""Either arr or size must be specified""" )
def snake_case_( self , A ) -> None:
_SCREAMING_SNAKE_CASE = len(__A )
_SCREAMING_SNAKE_CASE = deepcopy(__A )
for i in range(1 , self.size ):
_SCREAMING_SNAKE_CASE = self.next_(__A )
if j < self.size:
self.tree[j] += self.tree[i]
def snake_case_( self ) -> list[int]:
_SCREAMING_SNAKE_CASE = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_SCREAMING_SNAKE_CASE = self.next_(__A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def snake_case_( A ) -> int:
return index + (index & (-index))
@staticmethod
def snake_case_( A ) -> int:
return index - (index & (-index))
def snake_case_( self , A , A ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_SCREAMING_SNAKE_CASE = self.next_(__A )
def snake_case_( self , A , A ) -> None:
self.add(__A , value - self.get(__A ) )
def snake_case_( self , A ) -> int:
if right == 0:
return 0
_SCREAMING_SNAKE_CASE = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_SCREAMING_SNAKE_CASE = self.prev(__A )
return result
def snake_case_( self , A , A ) -> int:
return self.prefix(__A ) - self.prefix(__A )
def snake_case_( self , A ) -> int:
return self.query(__A , index + 1 )
def snake_case_( self , A ) -> int:
value -= self.tree[0]
if value < 0:
return -1
_SCREAMING_SNAKE_CASE = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_SCREAMING_SNAKE_CASE = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 0 |
import re
def lowerCamelCase( a__):
if len(re.findall('''[ATCG]''' ,snake_case__)) != len(snake_case__):
raise ValueError('''Invalid Strand''')
return dna.translate(dna.maketrans('''ATCG''' ,'''TAGC'''))
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
_UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCamelCase_( snake_case__: str ) -> Optional[int]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase__ = model_type_to_module_name(snake_case__ )
UpperCAmelCase__ = importlib.import_module(f".{module_name}" , 'transformers.models' )
try:
return getattr(snake_case__ , snake_case__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case__ , '__name__' , snake_case__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase__ = importlib.import_module('transformers' )
if hasattr(snake_case__ , snake_case__ ):
return getattr(snake_case__ , snake_case__ )
return None
def UpperCamelCase_( snake_case__: Union[str, os.PathLike] , snake_case__: Optional[Union[str, os.PathLike]] = None , snake_case__: bool = False , snake_case__: bool = False , snake_case__: Optional[Dict[str, str]] = None , snake_case__: Optional[Union[bool, str]] = None , snake_case__: Optional[str] = None , snake_case__: bool = False , **snake_case__: Any , ) -> Tuple:
UpperCAmelCase__ = get_file_from_repo(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(snake_case__ , encoding='utf-8' ) as reader:
return json.load(snake_case__ )
class lowercase :
'''simple docstring'''
def __init__(self ) -> Tuple:
"""simple docstring"""
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(__A )
def UpperCamelCase__ (cls , __a , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = kwargs.pop('config' , __A )
UpperCAmelCase__ = kwargs.pop('trust_remote_code' , __A )
UpperCAmelCase__ = True
UpperCAmelCase__ , UpperCAmelCase__ = ImageProcessingMixin.get_image_processor_dict(__A , **__A )
UpperCAmelCase__ = config_dict.get('image_processor_type' , __A )
UpperCAmelCase__ = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
UpperCAmelCase__ = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase__ = config_dict.pop('feature_extractor_type' , __A )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
UpperCAmelCase__ = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCAmelCase__ = config_dict['auto_map']['AutoFeatureExtractor']
UpperCAmelCase__ = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__A , __A ):
UpperCAmelCase__ = AutoConfig.from_pretrained(__A , **__A )
# It could be in `config.image_processor_type``
UpperCAmelCase__ = getattr(__A , 'image_processor_type' , __A )
if hasattr(__A , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase__ = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
UpperCAmelCase__ = image_processor_class_from_name(__A )
UpperCAmelCase__ = image_processor_auto_map is not None
UpperCAmelCase__ = image_processor_class is not None or type(__A ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase__ = resolve_trust_remote_code(
__A , __A , __A , __A )
if has_remote_code and trust_remote_code:
UpperCAmelCase__ = get_class_from_dynamic_module(
__A , __A , **__A )
UpperCAmelCase__ = kwargs.pop('code_revision' , __A )
if os.path.isdir(__A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__A , **__A )
elif image_processor_class is not None:
return image_processor_class.from_dict(__A , **__A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__A ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase__ = IMAGE_PROCESSOR_MAPPING[type(__A )]
return image_processor_class.from_dict(__A , **__A )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def UpperCamelCase__ (__a , __a ) -> Union[str, Any]:
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(__A , __A )
| 146 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = "ResNetConfig"
# Base docstring
__A = "microsoft/resnet-50"
__A = [1, 2_0_4_8, 7, 7]
# Image classification docstring
__A = "microsoft/resnet-50"
__A = "tiger cat"
__A = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ):
super().__init__()
lowercase__: Optional[int] = nn.Convad(
__A , __A , kernel_size=__A , stride=__A , padding=kernel_size // 2 , bias=__A )
lowercase__: str = nn.BatchNormad(__A )
lowercase__: str = ACTaFN[activation] if activation is not None else nn.Identity()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Tuple = self.convolution(__A )
lowercase__: Any = self.normalization(__A )
lowercase__: List[str] = self.activation(__A )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__()
lowercase__: int = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowercase__: str = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowercase__: Optional[int] = config.num_channels
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__: Optional[Any] = self.embedder(__A )
lowercase__: Any = self.pooler(__A )
return embedding
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 ):
super().__init__()
lowercase__: Any = nn.Convad(__A , __A , kernel_size=1 , stride=__A , bias=__A )
lowercase__: List[str] = nn.BatchNormad(__A )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Tuple = self.convolution(__A )
lowercase__: int = self.normalization(__A )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ):
super().__init__()
lowercase__: int = in_channels != out_channels or stride != 1
lowercase__: str = (
ResNetShortCut(__A , __A , stride=__A ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Optional[Any] = nn.Sequential(
ResNetConvLayer(__A , __A , stride=__A ) , ResNetConvLayer(__A , __A , activation=__A ) , )
lowercase__: Optional[int] = ACTaFN[activation]
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = hidden_state
lowercase__: List[str] = self.layer(__A )
lowercase__: Union[str, Any] = self.shortcut(__A )
hidden_state += residual
lowercase__: Optional[int] = self.activation(__A )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , _UpperCAmelCase = 4 ):
super().__init__()
lowercase__: Tuple = in_channels != out_channels or stride != 1
lowercase__: Union[str, Any] = out_channels // reduction
lowercase__: Any = (
ResNetShortCut(__A , __A , stride=__A ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Dict = nn.Sequential(
ResNetConvLayer(__A , __A , kernel_size=1 ) , ResNetConvLayer(__A , __A , stride=__A ) , ResNetConvLayer(__A , __A , kernel_size=1 , activation=__A ) , )
lowercase__: Tuple = ACTaFN[activation]
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[Any] = hidden_state
lowercase__: Optional[int] = self.layer(__A )
lowercase__: Dict = self.shortcut(__A )
hidden_state += residual
lowercase__: List[str] = self.activation(__A )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , ):
super().__init__()
lowercase__: Any = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
lowercase__: Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__A , __A , stride=__A , activation=config.hidden_act ) , *[layer(__A , __A , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = input
for layer in self.layers:
lowercase__: Tuple = layer(__A )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__()
lowercase__: List[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__A , config.depths[1:] ):
self.stages.append(ResNetStage(__A , __A , __A , depth=__A ) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ):
lowercase__: List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Any = hidden_states + (hidden_state,)
lowercase__: Dict = stage_module(__A )
if output_hidden_states:
lowercase__: List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=__A , )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ResNetConfig
_UpperCAmelCase :Any = '''resnet'''
_UpperCAmelCase :Tuple = '''pixel_values'''
_UpperCAmelCase :str = True
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(__A , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(__A , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(__A , __A ):
lowercase__: List[str] = value
__A = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,_UpperCAmelCase ,)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(__A )
lowercase__: str = config
lowercase__: Optional[int] = ResNetEmbeddings(__A )
lowercase__: Any = ResNetEncoder(__A )
lowercase__: str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
lowercase__: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Optional[int] = self.embedder(__A )
lowercase__: Optional[Any] = self.encoder(
__A , output_hidden_states=__A , return_dict=__A )
lowercase__: str = encoder_outputs[0]
lowercase__: Any = self.pooler(__A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__A , pooler_output=__A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(__A )
lowercase__: Union[str, Any] = config.num_labels
lowercase__: List[Any] = ResNetModel(__A )
# classification head
lowercase__: int = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Optional[int] = self.resnet(__A , output_hidden_states=__A , return_dict=__A )
lowercase__: int = outputs.pooler_output if return_dict else outputs[1]
lowercase__: int = self.classifier(__A )
lowercase__: Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Union[str, Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Optional[Any] = '''single_label_classification'''
else:
lowercase__: Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__: Optional[int] = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: str = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
lowercase__: List[Any] = CrossEntropyLoss()
lowercase__: Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: Dict = BCEWithLogitsLoss()
lowercase__: Dict = loss_fct(__A , __A )
if not return_dict:
lowercase__: Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,_UpperCAmelCase ,)
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(__A )
super()._init_backbone(__A )
lowercase__: Tuple = [config.embedding_size] + config.hidden_sizes
lowercase__: Optional[Any] = ResNetEmbeddings(__A )
lowercase__: Dict = ResNetEncoder(__A )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@replace_return_docstrings(output_type=__A , config_class=_CONFIG_FOR_DOC )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
lowercase__: Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: List[str] = self.embedder(__A )
lowercase__: List[Any] = self.encoder(__A , output_hidden_states=__A , return_dict=__A )
lowercase__: List[Any] = outputs.hidden_states
lowercase__: Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase__: str = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__A , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__A , )
| 586 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a: Dict = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase ):
def __init__( self : str , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] ):
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , __A , )
super().__init__(*__A , **__A ) | 162 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : int ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any]=267_735 , lowerCAmelCase__ :List[Any]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :str=1_024 , lowerCAmelCase__ :Dict=16 , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :Dict=4_096 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Union[str, Any]=18 , lowerCAmelCase__ :Tuple=1_600 , lowerCAmelCase__ :str=1_000 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Optional[int]=-1 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Tuple="normal" , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Any=0.02 , lowerCAmelCase__ :Union[str, Any]=1E-5 , lowerCAmelCase__ :List[Any]=0 , **lowerCAmelCase__ :str , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : str = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[Any] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Dict = d_model
__SCREAMING_SNAKE_CASE : Tuple = d_embed
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_head
__SCREAMING_SNAKE_CASE : Optional[int] = d_inner
__SCREAMING_SNAKE_CASE : Dict = div_val
__SCREAMING_SNAKE_CASE : str = pre_lnorm
__SCREAMING_SNAKE_CASE : Any = n_layer
__SCREAMING_SNAKE_CASE : Dict = n_head
__SCREAMING_SNAKE_CASE : Tuple = mem_len
__SCREAMING_SNAKE_CASE : int = same_length
__SCREAMING_SNAKE_CASE : Dict = attn_type
__SCREAMING_SNAKE_CASE : Any = clamp_len
__SCREAMING_SNAKE_CASE : int = sample_softmax
__SCREAMING_SNAKE_CASE : List[str] = adaptive
__SCREAMING_SNAKE_CASE : Tuple = dropout
__SCREAMING_SNAKE_CASE : Dict = dropatt
__SCREAMING_SNAKE_CASE : Any = untie_r
__SCREAMING_SNAKE_CASE : List[str] = init
__SCREAMING_SNAKE_CASE : Dict = init_range
__SCREAMING_SNAKE_CASE : List[str] = proj_init_std
__SCREAMING_SNAKE_CASE : Any = init_std
__SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_epsilon
super().__init__(eos_token_id=__A , **__A )
@property
def __magic_name__( self :str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Any , lowerCAmelCase__ :Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 0 |
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 398 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
from math import isqrt
def __lowerCamelCase ( A__ : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def __lowerCamelCase ( A__ : int = 10**6 ) -> int:
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : int = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 278 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''camembert'''
def __init__( self , a=3_0522 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1E-12 , a=1 , a=0 , a=2 , a="absolute" , a=True , a=None , **a , ) -> Optional[int]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class _snake_case ( A__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 73 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = '''hopper-medium-v2'''
SCREAMING_SNAKE_CASE_ = gym.make(env_name)
SCREAMING_SNAKE_CASE_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE_ = env.reset()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1000
SCREAMING_SNAKE_CASE_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE_ = pipeline(obs, planning_horizon=32)
# execute action in environment
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = env.step(denorm_actions)
SCREAMING_SNAKE_CASE_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
F' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE_ = next_observation
except KeyboardInterrupt:
pass
print(F'Total reward: {total_reward}')
| 373 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 0 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCAmelCase_ : Tuple = parse(importlib.metadata.version('torch'))
def _lowerCamelCase ( lowercase : Union[str, Version] , lowercase : str , lowercase : str ) -> int:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
_a = STR_OPERATION_TO_FUNC[operation]
if isinstance(snake_case__ , snake_case__ ):
_a = parse(importlib.metadata.version(snake_case__ ) )
return operation(snake_case__ , parse(snake_case__ ) )
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> Dict:
return compare_versions(snake_case__ , snake_case__ , snake_case__ )
| 692 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
lowercase_ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowercase_ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowercase_ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def snake_case_( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def snake_case_( self , A , A , A=None , A=1 , A="binary" , A=None , A="warn" , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = recall_score(
__A , __A , labels=__A , pos_label=__A , average=__A , sample_weight=__A , zero_division=__A , )
return {"recall": float(__A ) if score.size == 1 else score}
| 314 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
import os
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =os.path.dirname(os.path.realpath(snake_case__))
_SCREAMING_SNAKE_CASE =os.path.join(snake_case__ ,'''triangle.txt''')
with open(snake_case__) as f:
_SCREAMING_SNAKE_CASE =f.readlines()
_SCREAMING_SNAKE_CASE =[]
for line in triangle:
_SCREAMING_SNAKE_CASE =[]
for number in line.strip().split(''' '''):
numbers_from_line.append(int(snake_case__))
a.append(snake_case__)
for i in range(1 ,len(snake_case__)):
for j in range(len(a[i])):
_SCREAMING_SNAKE_CASE =a[i - 1][j] if j != len(a[i - 1]) else 0
_SCREAMING_SNAKE_CASE =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ ,snake_case__)
return max(a[-1])
if __name__ == "__main__":
print(solution()) | 691 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
_UpperCamelCase = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 146 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
"""simple docstring"""
import argparse
__A = "docs/source/_static/js/custom.js"
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
with open(snake_case__ , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__: int = f.readlines()
lowercase__: Union[str, Any] = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
lowercase__: Optional[Any] = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(snake_case__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
__A = parser.parse_args()
update_custom_js(args.version)
| 586 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
def __lowerCAmelCase ( A = 3 , A = 7 , A = 1000000 ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
for current_denominator in range(1 , limit + 1 ):
UpperCAmelCase_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase_ = current_numerator
UpperCAmelCase_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000)) | 162 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = DanceDiffusionPipeline
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
SCREAMING_SNAKE_CASE__ : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def __magic_name__( self :Tuple ) -> Tuple:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__A , use_timestep_embedding=__A , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
__SCREAMING_SNAKE_CASE : Any = IPNDMScheduler()
__SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __magic_name__( self :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int=0 ) -> Optional[Any]:
if str(__A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : str = torch.manual_seed(__A )
else:
__SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
__SCREAMING_SNAKE_CASE : str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Tuple = DanceDiffusionPipeline(**__A )
__SCREAMING_SNAKE_CASE : Dict = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__A )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**__A )
__SCREAMING_SNAKE_CASE : Tuple = output.audios
__SCREAMING_SNAKE_CASE : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __magic_name__( self :List[str] ) -> Union[str, Any]:
return super().test_save_load_local()
@skip_mps
def __magic_name__( self :Dict ) -> Any:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __magic_name__( self :Dict ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def __magic_name__( self :int ) -> Any:
return super().test_attention_slicing_forward_pass()
def __magic_name__( self :str ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Any ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device
__SCREAMING_SNAKE_CASE : str = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
__SCREAMING_SNAKE_CASE : Dict = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = pipe(generator=__A , num_inference_steps=100 , audio_length_in_s=4.096 )
__SCREAMING_SNAKE_CASE : List[str] = output.audios
__SCREAMING_SNAKE_CASE : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Dict = torch_device
__SCREAMING_SNAKE_CASE : List[Any] = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Tuple = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = pipe(generator=__A , num_inference_steps=100 , audio_length_in_s=4.096 )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.audios
__SCREAMING_SNAKE_CASE : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__SCREAMING_SNAKE_CASE : Tuple = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 696 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 768 , ):
'''simple docstring'''
super().__init__()
_lowercase = nn.Parameter(torch.zeros(1 , __A ) )
_lowercase = nn.Parameter(torch.ones(1 , __A ) )
def _UpperCAmelCase ( self , UpperCAmelCase = None , UpperCAmelCase = None , ):
'''simple docstring'''
_lowercase = nn.Parameter(self.mean.to(__A ).to(__A ) )
_lowercase = nn.Parameter(self.std.to(__A ).to(__A ) )
return self
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = (embeds * self.std) + self.mean
return embeds
| 398 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE_ (nn.Module ):
'''simple docstring'''
def __init__( self : str , __a : int = 16 , __a : int = 88 , __a : Optional[int] = None , __a : int = 1 , __a : float = 0.0 , __a : int = 32 , __a : Optional[int] = None , __a : bool = False , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "geglu" , __a : Optional[int] = None , ) ->Optional[int]:
super().__init__()
lowerCamelCase_ : Tuple = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__A , attention_head_dim=__A , in_channels=__A , num_layers=__A , dropout=__A , norm_num_groups=__A , cross_attention_dim=__A , attention_bias=__A , sample_size=__A , num_vector_embeds=__A , activation_fn=__A , num_embeds_ada_norm=__A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase_ : Optional[int] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase_ : Optional[int] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase_ : Any = [1, 0]
def _lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] , __a : Tuple , __a : Any=None , __a : Optional[int]=None , __a : Tuple=None , __a : bool = True , ) ->Optional[Any]:
lowerCamelCase_ : Union[str, Any] = hidden_states
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Union[str, Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase_ : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase_ : List[Any] = self.transformer_index_for_condition[i]
lowerCamelCase_ : List[str] = self.transformers[transformer_index](
__A , encoder_hidden_states=__A , timestep=__A , cross_attention_kwargs=__A , return_dict=__A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase_ : Any = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__A )
| 278 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( A__ ):
def __init__( self , a , a=768) -> Optional[int]:
super().__init__(__A)
SCREAMING_SNAKE_CASE = proj_size
SCREAMING_SNAKE_CASE = CLIPVisionModel(__A)
SCREAMING_SNAKE_CASE = PaintByExampleMapper(__A)
SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size)
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , self.proj_size)
# uncondition for scaling
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn((1, 1, self.proj_size)))
def SCREAMING_SNAKE_CASE__ ( self , a , a=False) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model(pixel_values=__A)
SCREAMING_SNAKE_CASE = clip_output.pooler_output
SCREAMING_SNAKE_CASE = self.mapper(latent_states[:, None])
SCREAMING_SNAKE_CASE = self.final_layer_norm(__A)
SCREAMING_SNAKE_CASE = self.proj_out(__A)
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _snake_case ( nn.Module ):
def __init__( self , a) -> str:
super().__init__()
SCREAMING_SNAKE_CASE = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE = config.hidden_size
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = nn.ModuleList(
[
BasicTransformerBlock(__A , __A , __A , activation_fn='gelu' , attention_bias=__A)
for _ in range(__A)
])
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
for block in self.blocks:
SCREAMING_SNAKE_CASE = block(__A)
return hidden_states
| 73 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar('''T''')
class _UpperCAmelCase ( Generic[T] ):
def __init__( self , lowercase_ = True ) -> None:
UpperCAmelCase = {} # dictionary of lists
UpperCAmelCase = directed
def a_ ( self , lowercase_ , lowercase_ ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
self.adj_list[destination_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
UpperCAmelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__A )
UpperCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCAmelCase = [destination_vertex]
UpperCAmelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
UpperCAmelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCAmelCase = [destination_vertex]
UpperCAmelCase = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 373 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='''perceiver'''
def __init__( self : Optional[Any] , __a : int=2_56 , __a : List[Any]=12_80 , __a : List[Any]=7_68 , __a : List[str]=1 , __a : Dict=26 , __a : Union[str, Any]=8 , __a : Union[str, Any]=8 , __a : List[Any]=None , __a : str=None , __a : str="kv" , __a : Union[str, Any]=1 , __a : Any=1 , __a : Any="gelu" , __a : Optional[Any]=0.1 , __a : Dict=0.02 , __a : Union[str, Any]=1e-1_2 , __a : List[Any]=True , __a : List[Any]=2_62 , __a : Any=20_48 , __a : str=56 , __a : Tuple=[3_68, 4_96] , __a : Union[str, Any]=16 , __a : List[str]=19_20 , __a : List[Any]=16 , __a : str=[1, 16, 2_24, 2_24] , **__a : Tuple , ):
super().__init__(**__A )
_a = num_latents
_a = d_latents
_a = d_model
_a = num_blocks
_a = num_self_attends_per_block
_a = num_self_attention_heads
_a = num_cross_attention_heads
_a = qk_channels
_a = v_channels
_a = cross_attention_shape_for_attention
_a = self_attention_widening_factor
_a = cross_attention_widening_factor
_a = hidden_act
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = use_query_residual
# masked language modeling attributes
_a = vocab_size
_a = max_position_embeddings
# image classification attributes
_a = image_size
# flow attributes
_a = train_size
# multimodal autoencoding attributes
_a = num_frames
_a = audio_samples_per_frame
_a = samples_per_patch
_a = output_shape
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : str ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def UpperCamelCase__ ( self : int ):
return 1e-4
def UpperCamelCase__ ( self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__A , __A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = preprocessor.num_special_tokens_to_add(__A )
_a = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_a = [" ".join(["a"] ) * seq_length] * batch_size
_a = dict(preprocessor(__A , return_tensors=__A ) )
_a = inputs.pop("input_ids" )
return inputs
elif isinstance(__A , __A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(__A , fixed_dimension=OnnxConfig.default_fixed_batch )
_a = self._generate_dummy_images(__A , __A , __A , __A )
_a = dict(preprocessor(images=__A , return_tensors=__A ) )
_a = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 692 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_SCREAMING_SNAKE_CASE = dict(zip(__A , range(len(__A ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__A ) )
_SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__A , __A )
def snake_case_( self , **A ) -> Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__A )
def snake_case_( self , **A ) -> Dict:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__A )
def snake_case_( self , **A ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def snake_case_( self ) -> str:
shutil.rmtree(self.tmpdirname )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
_SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__A , padding_value=1.0 )
_SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__A , image_processor=__A )
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = image_processor(__A , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = processor(images=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__A , image_processor=__A )
_SCREAMING_SNAKE_CASE = """lower newer"""
_SCREAMING_SNAKE_CASE = processor(text=__A )
_SCREAMING_SNAKE_CASE = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__A , image_processor=__A )
_SCREAMING_SNAKE_CASE = """lower newer"""
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__A , image_processor=__A )
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = processor(images=__A , visual_prompt=__A )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__A , image_processor=__A )
_SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE = processor.batch_decode(__A )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 314 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCamelCase( a__):
random.seed(snake_case__)
np.random.seed(snake_case__)
torch.manual_seed(snake_case__)
torch.cuda.manual_seed_all(snake_case__)
# ^^ safe to call this function even if cuda is not available
class A__ :
def __init__( self : Dict , _a : Iterable[torch.nn.Parameter] , _a : float = 0.99_99 , _a : float = 0.0 , _a : int = 0 , _a : bool = False , _a : Union[float, int] = 1.0 , _a : Union[float, int] = 2 / 3 , _a : Optional[Any] = None , _a : Dict[str, Any] = None , **_a : int , ) -> Tuple:
"""simple docstring"""
if isinstance(__A , torch.nn.Module ):
_SCREAMING_SNAKE_CASE =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __A , standard_warn=__A , )
_SCREAMING_SNAKE_CASE =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_SCREAMING_SNAKE_CASE =True
if kwargs.get('''max_value''' , __A ) is not None:
_SCREAMING_SNAKE_CASE ='''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , __A , standard_warn=__A )
_SCREAMING_SNAKE_CASE =kwargs['''max_value''']
if kwargs.get('''min_value''' , __A ) is not None:
_SCREAMING_SNAKE_CASE ='''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , __A , standard_warn=__A )
_SCREAMING_SNAKE_CASE =kwargs['''min_value''']
_SCREAMING_SNAKE_CASE =list(__A )
_SCREAMING_SNAKE_CASE =[p.clone().detach() for p in parameters]
if kwargs.get('''device''' , __A ) is not None:
_SCREAMING_SNAKE_CASE ='''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , __A , standard_warn=__A )
self.to(device=kwargs['''device'''] )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =decay
_SCREAMING_SNAKE_CASE =min_decay
_SCREAMING_SNAKE_CASE =update_after_step
_SCREAMING_SNAKE_CASE =use_ema_warmup
_SCREAMING_SNAKE_CASE =inv_gamma
_SCREAMING_SNAKE_CASE =power
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None # set in `step()`
_SCREAMING_SNAKE_CASE =model_cls
_SCREAMING_SNAKE_CASE =model_config
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , _a : int , _a : List[Any] ) -> "EMAModel":
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model_cls.load_config(__A , return_unused_kwargs=__A )
_SCREAMING_SNAKE_CASE =model_cls.from_pretrained(__A )
_SCREAMING_SNAKE_CASE =cls(model.parameters() , model_cls=__A , model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def __UpperCamelCase ( self : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_SCREAMING_SNAKE_CASE =self.model_cls.from_config(self.model_config )
_SCREAMING_SNAKE_CASE =self.state_dict()
state_dict.pop('''shadow_params''' , __A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def __UpperCamelCase ( self : Optional[Any] , _a : int ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_SCREAMING_SNAKE_CASE =1 - (1 + step / self.inv_gamma) ** -self.power
else:
_SCREAMING_SNAKE_CASE =(1 + step) / (10 + step)
_SCREAMING_SNAKE_CASE =min(__A , self.decay )
# make sure decay is not smaller than min_decay
_SCREAMING_SNAKE_CASE =max(__A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __UpperCamelCase ( self : Optional[int] , _a : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__A , torch.nn.Module ):
_SCREAMING_SNAKE_CASE =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __A , standard_warn=__A , )
_SCREAMING_SNAKE_CASE =parameters.parameters()
_SCREAMING_SNAKE_CASE =list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_SCREAMING_SNAKE_CASE =self.get_decay(self.optimization_step )
_SCREAMING_SNAKE_CASE =decay
_SCREAMING_SNAKE_CASE =1 - decay
_SCREAMING_SNAKE_CASE =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_SCREAMING_SNAKE_CASE =deepspeed.zero.GatheredParameters(__A , modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def __UpperCamelCase ( self : Any , _a : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(__A )
for s_param, param in zip(self.shadow_params , __A ):
param.data.copy_(s_param.to(param.device ).data )
def __UpperCamelCase ( self : Any , _a : Optional[int]=None , _a : Union[str, Any]=None ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[
p.to(device=__A , dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def __UpperCamelCase ( self : Optional[Any] ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __UpperCamelCase ( self : Dict , _a : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[param.detach().cpu().clone() for param in parameters]
def __UpperCamelCase ( self : int , _a : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , __A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_SCREAMING_SNAKE_CASE =None
def __UpperCamelCase ( self : List[str] , _a : dict ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(__A )
_SCREAMING_SNAKE_CASE =state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_SCREAMING_SNAKE_CASE =state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , __A ):
raise ValueError('''Invalid min_decay''' )
_SCREAMING_SNAKE_CASE =state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , __A ):
raise ValueError('''Invalid optimization_step''' )
_SCREAMING_SNAKE_CASE =state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , __A ):
raise ValueError('''Invalid update_after_step''' )
_SCREAMING_SNAKE_CASE =state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __A ):
raise ValueError('''Invalid use_ema_warmup''' )
_SCREAMING_SNAKE_CASE =state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_SCREAMING_SNAKE_CASE =state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_SCREAMING_SNAKE_CASE =state_dict.get('''shadow_params''' , __A )
if shadow_params is not None:
_SCREAMING_SNAKE_CASE =shadow_params
if not isinstance(self.shadow_params , __A ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(__A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' ) | 691 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = '''blenderbot-small'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , __a=50265 , __a=512 , __a=8 , __a=2048 , __a=16 , __a=8 , __a=2048 , __a=16 , __a=0.0 , __a=0.0 , __a=True , __a=True , __a="gelu" , __a=512 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1 , __a=False , __a=0 , __a=1 , __a=2 , __a=2 , **__a , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase__ = {0: 'batch'}
UpperCAmelCase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCAmelCase__ = {0: 'batch', 1: 'decoder_sequence'}
UpperCAmelCase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
for i in range(__A ):
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCAmelCase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = super().outputs
else:
UpperCAmelCase__ = super(__A , self ).outputs
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
for i in range(__A ):
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase__ (self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
# Generate decoder inputs
UpperCAmelCase__ = seq_length if not self.use_past else 1
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
UpperCAmelCase__ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ = dict(**__A , **__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ = common_inputs['input_ids'].shape
UpperCAmelCase__ = common_inputs['decoder_input_ids'].shape[1]
UpperCAmelCase__ , UpperCAmelCase__ = self.num_attention_heads
UpperCAmelCase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ = decoder_seq_length + 3
UpperCAmelCase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A , __A )] , dim=1 )
UpperCAmelCase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
UpperCAmelCase__ = min(__A , __A )
UpperCAmelCase__ = max(__A , __A ) - min_num_layers
UpperCAmelCase__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
UpperCAmelCase__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A , __A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def UpperCamelCase__ (self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCAmelCase__ = seqlen + 2
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
UpperCAmelCase__ , UpperCAmelCase__ = self.num_attention_heads
UpperCAmelCase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ = common_inputs['attention_mask'].dtype
UpperCAmelCase__ = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 )
UpperCAmelCase__ = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def UpperCamelCase__ (self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ = tokenizer.num_special_tokens_to_add(__A )
UpperCAmelCase__ = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ = dict(tokenizer(__A , return_tensors=__A ) )
return common_inputs
def UpperCamelCase__ (self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
elif self.task == "causal-lm":
UpperCAmelCase__ = self._generate_dummy_inputs_for_causal_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
else:
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
return common_inputs
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = super()._flatten_past_key_values_(__A , __A , __A , __A )
else:
UpperCAmelCase__ = super(__A , self )._flatten_past_key_values_(
__A , __A , __A , __A )
| 146 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 0 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__A = HfArgumentParser(InitializationArguments)
__A = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__A = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__A = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__A = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__A = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 586 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]=13 , lowerCAmelCase : Optional[Any]=30 , lowerCAmelCase : Any=2 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=32 , lowerCAmelCase : Any=2 , lowerCAmelCase : str=4 , lowerCAmelCase : List[str]=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Tuple=10 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Dict=None , ):
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[int] ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , )
def __A ( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = TFViTModel(config=__A )
UpperCAmelCase_ = model(__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(__A , interpolate_pos_encoding=__A , training=__A )
UpperCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __A ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFViTForImageClassification(__A )
UpperCAmelCase_ = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(__A , interpolate_pos_encoding=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTForImageClassification(__A )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = TFViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __A ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) )
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__A )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__A )
def __lowerCAmelCase ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__A , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**__A )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __A )
UpperCAmelCase_ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1e-4 ) | 162 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any]=7 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :Tuple=30 , lowerCAmelCase__ :Dict=400 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Union[str, Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :int=True , lowerCAmelCase__ :List[str]=1 / 255 , lowerCAmelCase__ :Union[str, Any]=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Dict = num_channels
__SCREAMING_SNAKE_CASE : str = min_resolution
__SCREAMING_SNAKE_CASE : Tuple = max_resolution
__SCREAMING_SNAKE_CASE : Tuple = do_resize
__SCREAMING_SNAKE_CASE : List[str] = size
__SCREAMING_SNAKE_CASE : int = do_normalize
__SCREAMING_SNAKE_CASE : Optional[Any] = image_mean
__SCREAMING_SNAKE_CASE : Tuple = image_std
__SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
__SCREAMING_SNAKE_CASE : Any = rescale_factor
__SCREAMING_SNAKE_CASE : Optional[int] = do_pad
def __magic_name__( self :str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __magic_name__( self :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str]=False ) -> Union[str, Any]:
if not batched:
__SCREAMING_SNAKE_CASE : Dict = image_inputs[0]
if isinstance(__A , Image.Image ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = image.size
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = image.shape[1], image.shape[2]
if w < h:
__SCREAMING_SNAKE_CASE : List[str] = int(self.size['''shortest_edge'''] * h / w )
__SCREAMING_SNAKE_CASE : int = self.size['''shortest_edge''']
elif w > h:
__SCREAMING_SNAKE_CASE : int = self.size['''shortest_edge''']
__SCREAMING_SNAKE_CASE : str = int(self.size['''shortest_edge'''] * w / h )
else:
__SCREAMING_SNAKE_CASE : List[str] = self.size['''shortest_edge''']
__SCREAMING_SNAKE_CASE : Any = self.size['''shortest_edge''']
else:
__SCREAMING_SNAKE_CASE : Tuple = []
for image in image_inputs:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__SCREAMING_SNAKE_CASE : Any = max(__A , key=lambda lowerCAmelCase__ : item[0] )[0]
__SCREAMING_SNAKE_CASE : str = max(__A , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = DetaImageProcessor if is_vision_available() else None
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = DetaImageProcessingTester(self )
@property
def __magic_name__( self :List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''do_rescale''' ) )
self.assertTrue(hasattr(__A , '''do_pad''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , __A )
def __magic_name__( self :List[Any] ) -> Any:
pass
def __magic_name__( self :Optional[int] ) -> Tuple:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__( self :Union[str, Any] ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(__A , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__( self :Optional[Any] ) -> Any:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(__A , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __magic_name__( self :List[str] ) -> List[str]:
# prepare image and target
__SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : Any = json.loads(f.read() )
__SCREAMING_SNAKE_CASE : int = {'''image_id''': 39_769, '''annotations''': target}
# encode them
__SCREAMING_SNAKE_CASE : Tuple = DetaImageProcessor()
__SCREAMING_SNAKE_CASE : Tuple = image_processing(images=__A , annotations=__A , return_tensors='''pt''' )
# verify pixel values
__SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1E-3 ) )
# verify image_id
__SCREAMING_SNAKE_CASE : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
__SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify orig_size
__SCREAMING_SNAKE_CASE : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
@slow
def __magic_name__( self :List[Any] ) -> Dict:
# prepare image, target and masks_path
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(f.read() )
__SCREAMING_SNAKE_CASE : Any = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
__SCREAMING_SNAKE_CASE : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__SCREAMING_SNAKE_CASE : List[str] = DetaImageProcessor(format='''coco_panoptic''' )
__SCREAMING_SNAKE_CASE : Any = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='''pt''' )
# verify pixel values
__SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__SCREAMING_SNAKE_CASE : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
__SCREAMING_SNAKE_CASE : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1E-3 ) )
# verify image_id
__SCREAMING_SNAKE_CASE : int = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
__SCREAMING_SNAKE_CASE : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify masks
__SCREAMING_SNAKE_CASE : Tuple = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __A )
# verify orig_size
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
| 696 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 0 |
import re
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(snake_case__ ,snake_case__ ) )
if __name__ == "__main__":
A_: Optional[Any] = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 398 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 0 |
import torch
def __lowerCamelCase ( ) -> List[str]:
if torch.cuda.is_available():
lowerCamelCase_ : Dict = torch.cuda.device_count()
else:
lowerCamelCase_ : Dict = 0
print(f'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 278 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
def __init__( self , a , a=None , a=None , a=None , a="resnet50" , a=3 , a=32 , a=3 , a=True , a=True , ) -> Any:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE = stage_names
SCREAMING_SNAKE_CASE = out_features
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = is_training
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = TimmBackbone(config=__A)
model.to(__A)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__A)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_lowercase : List[Any] = False
_lowercase : Tuple = False
_lowercase : List[str] = False
_lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = TimmBackboneModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , has_text_modality=__A)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = 'resnet18'
SCREAMING_SNAKE_CASE = 'microsoft/resnet-18'
SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(__A , use_timm_backbone=__A)
SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(__A)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(__A , use_timm_backbone=__A , out_indices=[1, 2, 3])
SCREAMING_SNAKE_CASE = AutoBackbone.from_pretrained(__A , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__A)
SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , __A)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE = self.all_model_classes[0]
SCREAMING_SNAKE_CASE = model_class(__A)
model.to(__A)
SCREAMING_SNAKE_CASE = self._prepare_for_class(__A , __A)
SCREAMING_SNAKE_CASE = model(**__A)
SCREAMING_SNAKE_CASE = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(**__A)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE = copy.deepcopy(__A)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = model_class(__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(**__A)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE = copy.deepcopy(__A)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = model_class(__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(**__A)
| 73 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
SCREAMING_SNAKE_CASE_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowercase__ ( lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = {}
with open(snake_case__ , 'r' ) as file:
for line_number, line in enumerate(snake_case__ ):
UpperCAmelCase = line.strip()
if line:
UpperCAmelCase = line.split()
UpperCAmelCase = line_number
UpperCAmelCase = words[0]
UpperCAmelCase = value
return result
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
UpperCAmelCase = getattr(snake_case__ , snake_case__ )
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCAmelCase = getattr(snake_case__ , snake_case__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
UpperCAmelCase = getattr(snake_case__ , snake_case__ )
UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase = value[0]
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
UpperCAmelCase = getattr(snake_case__ , snake_case__ )
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCAmelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = '.'.join([key, hf_param_name] )
else:
UpperCAmelCase = key
UpperCAmelCase = value if 'lm_head' in full_key else value[0]
SCREAMING_SNAKE_CASE_ = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowercase__ ( lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None ) -> Any:
"""simple docstring"""
UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(snake_case__ )[0].split('.' )[-2]
UpperCAmelCase = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase = 'weight_v'
elif "bias" in name:
UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = 'weight'
else:
UpperCAmelCase = None
if hf_dict is not None:
rename_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return is_used
return is_used
def lowercase__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase = True
else:
UpperCAmelCase = load_wavaveca_layer(snake_case__ , snake_case__ , snake_case__ )
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase__ ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = full_name.split('conv_layers.' )[-1]
UpperCAmelCase = name.split('.' )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=None , lowerCAmelCase : Dict=True , lowerCAmelCase : str=False ) -> int:
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = WavaVecaConfig.from_pretrained(snake_case__ )
else:
UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase = read_txt_into_dict(snake_case__ )
UpperCAmelCase = idalabel
UpperCAmelCase = WavaVecaForSequenceClassification(snake_case__ )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
feature_extractor.save_pretrained(snake_case__ )
elif is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(snake_case__ , 'vocab.json' )
if not os.path.isdir(snake_case__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase = 0
UpperCAmelCase = 1
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(snake_case__ , snake_case__ )
UpperCAmelCase = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case__ , )
UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
UpperCAmelCase = WavaVecaForCTC(snake_case__ )
else:
UpperCAmelCase = WavaVecaForPreTraining(snake_case__ )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCAmelCase = argparse.Namespace(task='audio_pretraining' )
UpperCAmelCase = fairseq.tasks.setup_task(snake_case__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case__ )
UpperCAmelCase = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , not is_finetuned )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 373 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='''instructblip_vision_model'''
def __init__( self : Tuple , __a : Union[str, Any]=14_08 , __a : Optional[Any]=61_44 , __a : Tuple=39 , __a : Any=16 , __a : Any=2_24 , __a : List[Any]=14 , __a : Dict="gelu" , __a : str=1e-6 , __a : Optional[int]=0.0 , __a : int=1e-1_0 , __a : Tuple=True , **__a : Any , ):
super().__init__(**__A )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
_a = qkv_bias
@classmethod
def UpperCamelCase__ ( cls : List[str] , __a : Union[str, os.PathLike] , **__a : Tuple ):
cls._set_token_in_kwargs(__A )
_a , _a = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='''instructblip_qformer'''
def __init__( self : Optional[int] , __a : str=3_05_22 , __a : str=7_68 , __a : Tuple=12 , __a : Tuple=12 , __a : Any=30_72 , __a : List[str]="gelu" , __a : List[str]=0.1 , __a : int=0.1 , __a : List[Any]=5_12 , __a : Optional[int]=0.02 , __a : Optional[Any]=1e-1_2 , __a : Optional[int]=0 , __a : Optional[int]="absolute" , __a : Optional[Any]=2 , __a : str=14_08 , **__a : str , ):
super().__init__(pad_token_id=__A , **__A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = cross_attention_frequency
_a = encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls : Dict , __a : Union[str, os.PathLike] , **__a : int ):
cls._set_token_in_kwargs(__A )
_a , _a = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_a = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='''instructblip'''
__a =True
def __init__( self : Any , __a : int=None , __a : List[Any]=None , __a : str=None , __a : int=32 , **__a : Optional[Any] ):
super().__init__(**__A )
if vision_config is None:
_a = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_a = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_a = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_a = InstructBlipVisionConfig(**__A )
_a = InstructBlipQFormerConfig(**__A )
_a = text_config["model_type"] if "model_type" in text_config else "opt"
_a = CONFIG_MAPPING[text_model_type](**__A )
_a = self.text_config.tie_word_embeddings
_a = self.text_config.is_encoder_decoder
_a = num_query_tokens
_a = self.vision_config.hidden_size
_a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a = 1.0
_a = 0.02
@classmethod
def UpperCamelCase__ ( cls : Union[str, Any] , __a : InstructBlipVisionConfig , __a : InstructBlipQFormerConfig , __a : PretrainedConfig , **__a : Tuple , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.qformer_config.to_dict()
_a = self.text_config.to_dict()
_a = self.__class__.model_type
return output
| 692 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class a_ :
'''simple docstring'''
def __init__( self , A , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = 30
_SCREAMING_SNAKE_CASE = self.seq_length + self.mem_len
_SCREAMING_SNAKE_CASE = 15
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = [10, 50, 80]
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = self.vocab_size - 1
_SCREAMING_SNAKE_CASE = 0.01
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case_( self ) -> List[Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case_( self , A , A , A , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFTransfoXLModel(__A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids_a, """mems""": mems_a}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case_( self , A , A , A , A ) -> str:
_SCREAMING_SNAKE_CASE = TFTransfoXLLMHeadModel(__A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model([input_ids_a, mems_a] ).to_tuple()
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case_( self , A , A , A , A ) -> Dict:
_SCREAMING_SNAKE_CASE = TFTransfoXLForSequenceClassification(__A )
_SCREAMING_SNAKE_CASE = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase = () if is_tf_available() else ()
UpperCamelCase = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self , A , A , A , A , A ) -> int:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = TFTransfoXLModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , d_embed=37 )
def snake_case_( self ) -> List[Any]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> List[Any]:
self.model_tester.set_seed()
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__A )
def snake_case_( self ) -> List[str]:
self.model_tester.set_seed()
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__A )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(__A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert isinstance(__A , tf.keras.layers.Layer )
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
else:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
def snake_case_( self ) -> int:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case_( self ) -> List[Any]:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFTransfoXLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip(reason="""This model doesn\'t play well with fit() due to not returning a single loss.""" )
def snake_case_( self ) -> List[Any]:
pass
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_SCREAMING_SNAKE_CASE = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_SCREAMING_SNAKE_CASE = model.generate(__A , max_length=200 , do_sample=__A )
self.assertListEqual(output_ids[0].numpy().tolist() , __A )
| 314 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : Any = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__(self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = None , __a = True , __a = 1 / 255 , __a = True , __a = IMAGENET_DEFAULT_MEAN , __a = IMAGENET_DEFAULT_STD , **__a , ) -> None:
"""simple docstring"""
super().__init__(**__A )
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 224}
UpperCAmelCase__ = get_size_dict(__A , default_to_square=__A )
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(__A , param_name='crop_size' )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ (self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = get_size_dict(__A , default_to_square=__A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ = int((256 / 224) * size['shortest_edge'] )
UpperCAmelCase__ = get_resize_output_image_size(__A , size=__A , default_to_square=__A )
UpperCAmelCase__ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
__A , size=(size_dict['height'], size_dict['width']) , resample=__A , data_format=__A , **__A )
def UpperCamelCase__ (self , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A )
def UpperCamelCase__ (self , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
return rescale(__A , scale=__A , data_format=__A , **__A )
def UpperCamelCase__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def UpperCamelCase__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(__A , default_to_square=__A )
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(__A , param_name='crop_size' )
UpperCAmelCase__ = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(__A ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(__A , __A , __A ) for image in images]
if do_center_crop:
UpperCAmelCase__ = [self.center_crop(__A , __A ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(__A , __A ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(__A , __A , __A ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(__A , __A ) for image in images]
UpperCAmelCase__ = {'pixel_values': images}
return BatchFeature(data=__A , tensor_type=__A )
| 146 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
self.register_modules(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , safety_checker=__A , feature_extractor=__A , )
def _snake_case ( self , _UpperCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__: Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def _snake_case ( self ):
self.enable_attention_slicing(__A )
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if isinstance(__A , __A ):
lowercase__: Optional[int] = 1
elif isinstance(__A , __A ):
lowercase__: Union[str, Any] = len(__A )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__A )}.""" )
# get prompt text embeddings
lowercase__: Dict = self.tokenizer(
__A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowercase__: Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__: Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__: Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__: List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__, lowercase__, lowercase__: Tuple = text_embeddings.shape
lowercase__: List[str] = text_embeddings.repeat(1 , __A , 1 )
lowercase__: List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__: int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__: Any = 42
if negative_prompt is None:
lowercase__: List[str] = ['''''']
elif type(__A ) is not type(__A ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !="""
F""" {type(__A )}.""" )
elif isinstance(__A , __A ):
lowercase__: Optional[int] = [negative_prompt]
elif batch_size != len(__A ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__: List[Any] = negative_prompt
lowercase__: Dict = text_input_ids.shape[-1]
lowercase__: Union[str, Any] = self.tokenizer(
__A , padding='''max_length''' , max_length=__A , truncation=__A , return_tensors='''pt''' , )
lowercase__: Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__: str = uncond_embeddings.shape[1]
lowercase__: Optional[Any] = uncond_embeddings.repeat(__A , __A , 1 )
lowercase__: Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , __A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__: str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__: Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__: Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__: Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__: Tuple = torch.randn(
__A , generator=__A , device='''cpu''' , dtype=__A ).to(self.device )
lowercase__: Optional[int] = torch.randn(__A , generator=__A , device='''cpu''' , dtype=__A ).to(
self.device )
else:
lowercase__: List[str] = torch.randn(
__A , generator=__A , device=self.device , dtype=__A )
lowercase__: Union[str, Any] = torch.randn(__A , generator=__A , device=self.device , dtype=__A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__: Optional[int] = latents_reference.to(self.device )
lowercase__: Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__: int = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__: List[Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__: Dict = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__: Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__: int = 0 if dx < 0 else dx
lowercase__: Tuple = 0 if dy < 0 else dy
lowercase__: Any = max(-dx , 0 )
lowercase__: List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase__: Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__: Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__: Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__: Tuple = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__: Tuple = {}
if accepts_eta:
lowercase__: List[str] = eta
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
lowercase__: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__: Optional[int] = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
lowercase__: str = self.unet(__A , __A , encoder_hidden_states=__A ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__, lowercase__: List[Any] = noise_pred.chunk(2 )
lowercase__: Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__: str = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A , __A )
lowercase__: Union[str, Any] = 1 / 0.18_215 * latents
lowercase__: Tuple = self.vae.decode(__A ).sample
lowercase__: Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__: str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase__: Dict = self.feature_extractor(self.numpy_to_pil(__A ) , return_tensors='''pt''' ).to(
self.device )
lowercase__, lowercase__: str = self.safety_checker(
images=__A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase__: Any = None
if output_type == "pil":
lowercase__: str = self.numpy_to_pil(__A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
| 586 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 0 |
def __lowerCAmelCase ( A ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
UpperCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCAmelCase_ = 1
if upper_limit > 0:
UpperCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_a: str = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod() | 162 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 0 |
from typing import Any
import numpy as np
def _UpperCamelCase ( lowercase__ ):
return np.array_equal(snake_case__ , matrix.conjugate().T )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = v.conjugate().T
__SCREAMING_SNAKE_CASE : int = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
__SCREAMING_SNAKE_CASE : Tuple = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
__SCREAMING_SNAKE_CASE : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 696 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
A_: str = {str(digit): digit**5 for digit in range(10)}
def __lowerCAmelCase ( _A ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def __lowerCAmelCase ( ):
"""simple docstring"""
return sum(
number
for number in range(1_000 ,1_000_000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution())
| 398 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 0 |
import string
def __lowerCamelCase ( A__ : str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
lowerCamelCase_ : str = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCamelCase_ : Union[str, Any] = string.ascii_uppercase.find(snake_case__ )
lowerCamelCase_ : Any = num - key
if num < 0:
lowerCamelCase_ : List[Any] = num + len(string.ascii_uppercase )
lowerCamelCase_ : Tuple = translated + string.ascii_uppercase[num]
else:
lowerCamelCase_ : List[Any] = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def __lowerCamelCase ( ) -> None:
lowerCamelCase_ : str = input("""Encrypted message: """ )
lowerCamelCase_ : Optional[Any] = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 278 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = len(snake_case__)
while cur > 1:
# Find the maximum number in arr
SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur]))
# Reverse from 0 to mi
SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(snake_case__)]
# Reverse whole list
SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(snake_case__)]
cur -= 1
return arr
if __name__ == "__main__":
a_ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
a_ : List[str] = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 73 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 373 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.