code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__UpperCamelCase : Optional[int] = 1.0_5457_1817E-34 # unit of ℏ : J * s
__UpperCamelCase : str = 3E8 # unit of c : m * s^-1
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
UpperCAmelCase_ : Tuple = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
UpperCAmelCase_ : List[Any] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
UpperCAmelCase_ : Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 1
|
__UpperCamelCase : Any = 'Input must be a string of 8 numbers plus letter'
__UpperCamelCase : int = 'TRWAGMYFPDXBNJZSQVHLCKE'
def snake_case_ ( __lowercase ):
if not isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ : Any = F'''Expected string as input, found {type(__lowercase ).__name__}'''
raise TypeError(__lowercase )
UpperCAmelCase_ : Union[str, Any] = spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowercase ) != 9:
raise ValueError(__lowercase )
try:
UpperCAmelCase_ : Tuple = int(spanish_id_clean[0:8] )
UpperCAmelCase_ : List[str] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowercase ) from ex
if letter.isdigit():
raise ValueError(__lowercase )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 1
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Any = 'EncodecFeatureExtractor'
A_ : Dict = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Any , __snake_case : Tuple , __snake_case : Any ):
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
UpperCAmelCase_ : Tuple = self.feature_extractor
UpperCAmelCase_ : int = False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : Any=None , __snake_case : Tuple=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : Optional[Any] , *__snake_case : List[Any] , **__snake_case : str ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
UpperCAmelCase_ : int = kwargs.pop('''audio''' , __snake_case )
UpperCAmelCase_ : Dict = kwargs.pop('''sampling_rate''' , __snake_case )
UpperCAmelCase_ : str = kwargs.pop('''text''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase_ : Tuple = args[0]
UpperCAmelCase_ : List[str] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
UpperCAmelCase_ : Tuple = self.tokenizer(__snake_case , **__snake_case )
if audio is not None:
UpperCAmelCase_ : Optional[int] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCAmelCase_ : Optional[int] = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
UpperCAmelCase_ : Dict = audio_inputs['''padding_mask''']
return inputs
def _lowerCamelCase ( self : int , *__snake_case : Tuple , **__snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = kwargs.pop('''audio''' , __snake_case )
UpperCAmelCase_ : Optional[Any] = kwargs.pop('''padding_mask''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase_ : Optional[int] = args[0]
UpperCAmelCase_ : int = args[1:]
if audio_values is not None:
return self._decode_audio(__snake_case , padding_mask=__snake_case )
else:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : Optional[int] , *__snake_case : str , **__snake_case : str ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : Tuple , __snake_case : int , __snake_case : Optional = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = to_numpy(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = audio_values.shape
if padding_mask is None:
return list(__snake_case )
UpperCAmelCase_ : Tuple = to_numpy(__snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCAmelCase_ : Dict = seq_len - padding_mask.shape[-1]
UpperCAmelCase_ : Optional[Any] = 1 - self.feature_extractor.padding_value
UpperCAmelCase_ : str = np.pad(__snake_case , ((0, 0), (0, difference)) , '''constant''' , constant_values=__snake_case )
UpperCAmelCase_ : List[Any] = audio_values.tolist()
for i in range(__snake_case ):
UpperCAmelCase_ : Union[str, Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCAmelCase_ : Any = sliced_audio.reshape(__snake_case , -1 )
return audio_values
| 641
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def snake_case_ ( __lowercase=None ):
if subparsers is not None:
UpperCAmelCase_ : List[str] = subparsers.add_parser('''test''' )
else:
UpperCAmelCase_ : Dict = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=__lowercase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__lowercase )
return parser
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : int = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase_ : Optional[Any] = script_name
else:
UpperCAmelCase_ : Tuple = F'''--config_file={args.config_file} {script_name}'''
UpperCAmelCase_ : Any = ['''accelerate-launch'''] + test_args.split()
UpperCAmelCase_ : int = execute_subprocess_async(__lowercase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def snake_case_ ( ):
UpperCAmelCase_ : List[str] = test_command_parser()
UpperCAmelCase_ : str = parser.parse_args()
test_command(__lowercase )
if __name__ == "__main__":
main()
| 641
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 1
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__UpperCamelCase : Optional[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__UpperCamelCase : List[str] = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case_ ( ):
UpperCAmelCase_ : Union[str, Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase_ : Any = bs[:]
UpperCAmelCase_ : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : str = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[int] = set()
UpperCAmelCase_ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Union[str, Any] = char
return pairs
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = VOCAB_FILES_NAMES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , __snake_case : Any , __snake_case : Tuple , __snake_case : List[Any]="replace" , __snake_case : Union[str, Any]="<s>" , __snake_case : Tuple="</s>" , __snake_case : Tuple="</s>" , __snake_case : Optional[int]="<s>" , __snake_case : int="<unk>" , __snake_case : List[Any]="<pad>" , __snake_case : Tuple="<mask>" , __snake_case : Any=False , **__snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
UpperCAmelCase_ : str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
UpperCAmelCase_ : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
UpperCAmelCase_ : int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
UpperCAmelCase_ : int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ : str = json.load(__snake_case )
UpperCAmelCase_ : List[Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : List[Any] = errors # how to handle errors in decoding
UpperCAmelCase_ : Tuple = bytes_to_unicode()
UpperCAmelCase_ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase_ : Dict = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : Optional[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self : str , __snake_case : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Optional[Any] = tuple(__snake_case )
UpperCAmelCase_ : Optional[int] = get_pairs(__snake_case )
if not pairs:
return token
while True:
UpperCAmelCase_ : Dict = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : str = bigram
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = 0
while i < len(__snake_case ):
try:
UpperCAmelCase_ : Dict = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : List[Any] = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[Any] = tuple(__snake_case )
UpperCAmelCase_ : str = new_word
if len(__snake_case ) == 1:
break
else:
UpperCAmelCase_ : Optional[int] = get_pairs(__snake_case )
UpperCAmelCase_ : Any = ''' '''.join(__snake_case )
UpperCAmelCase_ : str = word
return word
def _lowerCamelCase ( self : List[str] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : str = []
for token in re.findall(self.pat , __snake_case ):
UpperCAmelCase_ : Any = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(''' ''' ) )
return bpe_tokens
def _lowerCamelCase ( self : List[Any] , __snake_case : List[Any] ):
'''simple docstring'''
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self : List[str] , __snake_case : str ):
'''simple docstring'''
return self.decoder.get(__snake_case )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = ''''''.join(__snake_case )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowerCamelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : int = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
UpperCAmelCase_ : Optional[int] = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase_ : Optional[Any] = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self : List[Any] , __snake_case : Optional[int] , __snake_case : Any=False , **__snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = ''' ''' + text
return (text, kwargs)
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : List[Any] , __snake_case : "Conversation" ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__snake_case )
UpperCAmelCase_ : List[str] = ''' '''.join(__snake_case )
UpperCAmelCase_ : Any = self.encode(__snake_case )
if len(__snake_case ) > self.model_max_length:
UpperCAmelCase_ : Dict = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 1
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__UpperCamelCase : str = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def snake_case_ ( __lowercase ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
__UpperCamelCase : Dict = parser.parse_args()
if args.check_lib:
__UpperCamelCase : List[str] = importlib.import_module('transformers')
__UpperCamelCase : str = Path(transformers_module.__file__).parent
else:
__UpperCamelCase : List[Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 641
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 1
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCamelCase : int = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] , *__snake_case : List[str] , **__snake_case : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 641
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 1
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__snake_case ):
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase_ : str = FlaxAutoModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__snake_case ):
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase_ : Tuple = FlaxAutoModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(__snake_case )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase_ : Tuple = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__snake_case : Optional[Any] ):
return model(**__snake_case )
eval(**__snake_case ).block_until_ready()
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(__snake_case )
UpperCAmelCase_ : Union[str, Any] = FlaxRobertaModel.from_pretrained(__snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__snake_case : List[str] ):
return model(**__snake_case )
eval(**__snake_case ).block_until_ready()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Any = FlaxAutoModel.from_pretrained('''bert-base''' )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : Tuple = FlaxAutoModel.from_pretrained(__snake_case , revision='''aaaaaa''' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
UpperCAmelCase_ : Optional[Any] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(__snake_case , '''Use `from_pt=True` to load this model''' ):
UpperCAmelCase_ : Optional[int] = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 641
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : int ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : List[str] = (32, 32)
UpperCAmelCase_ : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case )
return image
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__snake_case )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
def extract(*__snake_case : Any , **__snake_case : List[str] ):
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = torch.ones([0] )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[Any] ):
'''simple docstring'''
self.pixel_values.to(__snake_case )
return self
return Out()
return extract
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.dummy_cond_unet
UpperCAmelCase_ : Tuple = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
UpperCAmelCase_ : int = self.dummy_vae
UpperCAmelCase_ : Dict = self.dummy_text_encoder
UpperCAmelCase_ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : int = StableDiffusionPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : int = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : int = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[int] = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : Any = sd_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : str = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__snake_case , )[0]
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : int = PNDMScheduler(skip_prk_steps=__snake_case )
UpperCAmelCase_ : List[str] = self.dummy_vae
UpperCAmelCase_ : Dict = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Dict = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Dict = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : List[Any] = sd_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Tuple = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__snake_case , )[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(pipe.scheduler , __snake_case )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[str] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__snake_case )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : int = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.dummy_cond_unet
UpperCAmelCase_ : Optional[int] = PNDMScheduler(skip_prk_steps=__snake_case )
UpperCAmelCase_ : Tuple = self.dummy_vae
UpperCAmelCase_ : str = self.dummy_text_encoder
UpperCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCAmelCase_ : Tuple = unet.half()
UpperCAmelCase_ : Dict = vae.half()
UpperCAmelCase_ : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : List[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Any = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
UpperCAmelCase_ : Optional[Any] = 4_003_660_346
UpperCAmelCase_ : Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : List[Any] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : List[str] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case )
UpperCAmelCase_ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Optional[Any] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
UpperCAmelCase_ : str = 2_734_971_755
UpperCAmelCase_ : Tuple = 7
UpperCAmelCase_ : Any = torch.manual_seed(__snake_case )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCAmelCase_ : Optional[int] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCAmelCase_ : int = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Union[str, Any] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
UpperCAmelCase_ : Optional[Any] = 1_044_355_234
UpperCAmelCase_ : int = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCAmelCase_ : Dict = torch.manual_seed(__snake_case )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 641
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : float , **__snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Optional[Any] = padding_value
UpperCAmelCase_ : List[Any] = kwargs.pop('''padding_side''' , '''right''' )
UpperCAmelCase_ : Optional[Any] = kwargs.pop('''return_attention_mask''' , __snake_case )
super().__init__(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case : Union[bool, str, PaddingStrategy] = True , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : List[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Any = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[Any] = required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
UpperCAmelCase_ : List[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
UpperCAmelCase_ : List[Any] = '''tf'''
elif is_torch_tensor(__snake_case ):
UpperCAmelCase_ : Optional[Any] = '''pt'''
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = '''np'''
else:
raise ValueError(
f'''type of {first_element} unknown: {type(__snake_case )}. '''
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : List[Any] = to_numpy(__snake_case )
else:
UpperCAmelCase_ : Union[str, Any] = [to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Any = self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : Any = len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
UpperCAmelCase_ : int = []
for i in range(__snake_case ):
UpperCAmelCase_ : List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : Tuple = self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : List[str] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Any = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : Optional[Any] = {}
for i in range(__snake_case ):
# padding
UpperCAmelCase_ : Dict = self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : Dict = value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : List[Any] = len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Tuple = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : str = np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : str = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
UpperCAmelCase_ : Optional[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[int] = np.pad(
__snake_case , __snake_case , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : List[str] = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
UpperCAmelCase_ : List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : int = np.pad(
__snake_case , __snake_case , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def _lowerCamelCase ( self : List[str] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
UpperCAmelCase_ : List[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Union[str, Any] = len(__snake_case ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Any = processed_features['''attention_mask'''][:max_length]
return processed_features
def _lowerCamelCase ( self : str , __snake_case : Dict=False , __snake_case : Union[str, Any]=None ):
'''simple docstring'''
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : int = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : str = PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : List[Any] = padding
else:
UpperCAmelCase_ : List[str] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 641
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
def update_area_of_max_square(__lowercase , __lowercase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCAmelCase_ : int = update_area_of_max_square(__lowercase , col + 1 )
UpperCAmelCase_ : Dict = update_area_of_max_square(row + 1 , col + 1 )
UpperCAmelCase_ : int = update_area_of_max_square(row + 1 , __lowercase )
if mat[row][col]:
UpperCAmelCase_ : List[str] = 1 + min([right, diagonal, down] )
UpperCAmelCase_ : str = max(largest_square_area[0] , __lowercase )
return sub_problem_sol
else:
return 0
UpperCAmelCase_ : List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
def update_area_of_max_square_using_dp_array(
__lowercase , __lowercase , __lowercase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCAmelCase_ : str = update_area_of_max_square_using_dp_array(__lowercase , col + 1 , __lowercase )
UpperCAmelCase_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __lowercase )
UpperCAmelCase_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 , __lowercase , __lowercase )
if mat[row][col]:
UpperCAmelCase_ : Dict = 1 + min([right, diagonal, down] )
UpperCAmelCase_ : List[Any] = max(largest_square_area[0] , __lowercase )
UpperCAmelCase_ : List[str] = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCAmelCase_ : List[str] = [0]
UpperCAmelCase_ : int = [[-1] * cols for _ in range(__lowercase )]
update_area_of_max_square_using_dp_array(0 , 0 , __lowercase )
return largest_square_area[0]
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCAmelCase_ : Tuple = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase_ : List[str] = dp_array[row][col + 1]
UpperCAmelCase_ : int = dp_array[row + 1][col + 1]
UpperCAmelCase_ : int = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCAmelCase_ : Any = 1 + min(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : str = max(dp_array[row][col] , __lowercase )
else:
UpperCAmelCase_ : Tuple = 0
return largest_square_area
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = [0] * (cols + 1)
UpperCAmelCase_ : Optional[int] = [0] * (cols + 1)
UpperCAmelCase_ : Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase_ : int = current_row[col + 1]
UpperCAmelCase_ : List[str] = next_row[col + 1]
UpperCAmelCase_ : int = next_row[col]
if mat[row][col] == 1:
UpperCAmelCase_ : Optional[int] = 1 + min(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Dict = max(current_row[col] , __lowercase )
else:
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Optional[int] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 641
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCamelCase : int = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
if attention_mask is None:
UpperCAmelCase_ : int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Any , __snake_case : Dict , __snake_case : str=13 , __snake_case : List[str]=7 , __snake_case : str=True , __snake_case : List[str]=False , __snake_case : str=99 , __snake_case : Optional[Any]=16 , __snake_case : Tuple=2 , __snake_case : Any=4 , __snake_case : Optional[int]=4 , __snake_case : Optional[int]="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : List[str]=32 , __snake_case : Tuple=2 , __snake_case : int=1 , __snake_case : Dict=0 , __snake_case : Dict=0.02 , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Optional[Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : int = eos_token_id
UpperCAmelCase_ : Optional[Any] = pad_token_id
UpperCAmelCase_ : Union[str, Any] = bos_token_id
UpperCAmelCase_ : Optional[int] = initializer_range
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : int = shift_tokens_right(__snake_case , 1 , 2 )
UpperCAmelCase_ : List[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__snake_case , )
UpperCAmelCase_ : Union[str, Any] = prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 20
UpperCAmelCase_ : Optional[int] = model_class_name(__snake_case )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ : str = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
UpperCAmelCase_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase_ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
UpperCAmelCase_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__snake_case , )
UpperCAmelCase_ : List[str] = model.decode(__snake_case , __snake_case )
UpperCAmelCase_ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _lowerCamelCase ( self : Dict , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 20
UpperCAmelCase_ : List[Any] = model_class_name(__snake_case )
UpperCAmelCase_ : Any = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
UpperCAmelCase_ : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
UpperCAmelCase_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , __snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__snake_case , decoder_position_ids=__snake_case , )
UpperCAmelCase_ : Optional[int] = model.decode(__snake_case , __snake_case , decoder_attention_mask=__snake_case )
UpperCAmelCase_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = 9_9
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : List[Any] = input_ids.shape[0]
UpperCAmelCase_ : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._get_config_and_data()
UpperCAmelCase_ : Dict = FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
UpperCAmelCase_ : int = lm_model(input_ids=__snake_case )
UpperCAmelCase_ : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ : int = FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
UpperCAmelCase_ : Union[str, Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : int = lm_model(input_ids=__snake_case , decoder_input_ids=__snake_case )
UpperCAmelCase_ : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ : Union[str, Any] = shift_tokens_right(__snake_case , 1 , 2 )
UpperCAmelCase_ : Optional[Any] = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : Any = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__snake_case , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase__( snake_case__ , unittest.TestCase , snake_case__ ):
'''simple docstring'''
A_ : Tuple = True
A_ : Dict = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A_ : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__snake_case , __snake_case , __snake_case )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__snake_case , __snake_case , __snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[str] = self._prepare_for_class(__snake_case , __snake_case )
UpperCAmelCase_ : int = model_class(__snake_case )
@jax.jit
def encode_jitted(__snake_case : Union[str, Any] , __snake_case : Dict=None , **__snake_case : List[Any] ):
return model.encode(input_ids=__snake_case , attention_mask=__snake_case )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ : Dict = encode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[Any] = encode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = model_class(__snake_case )
UpperCAmelCase_ : Any = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase_ : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] ):
return model.decode(
decoder_input_ids=__snake_case , decoder_attention_mask=__snake_case , encoder_outputs=__snake_case , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ : int = decode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ : List[Any] = decode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : Any = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
self.assertIsNotNone(__snake_case )
| 641
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase__( unittest.TestCase , snake_case__ ):
'''simple docstring'''
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = load_tool('''text-to-speech''' )
self.tool.setup()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = self.tool('''hey''' )
UpperCAmelCase_ : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = self.tool('''hey''' )
UpperCAmelCase_ : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 641
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'naver-clova-ix/donut-base-finetuned-docvqa'
A_ : int = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
A_ : Any = 'document_qa'
A_ : Optional[Any] = AutoProcessor
A_ : List[str] = VisionEncoderDecoderModel
A_ : Optional[int] = ['image', 'text']
A_ : Any = ['text']
def __init__( self : List[Any] , *__snake_case : int , **__snake_case : str ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : Any , __snake_case : "Image" , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : str = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase_ : Union[str, Any] = task_prompt.replace('''{user_input}''' , __snake_case )
UpperCAmelCase_ : Tuple = self.pre_processor.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors='''pt''' ).input_ids
UpperCAmelCase_ : Optional[Any] = self.pre_processor(__snake_case , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self : Tuple , __snake_case : List[str] ):
'''simple docstring'''
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__snake_case , ).sequences
def _lowerCamelCase ( self : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.pre_processor.batch_decode(__snake_case )[0]
UpperCAmelCase_ : List[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
UpperCAmelCase_ : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
UpperCAmelCase_ : Optional[int] = re.sub(R'''<.*?>''' , '''''' , __snake_case , count=1 ).strip() # remove first task start token
UpperCAmelCase_ : Optional[Any] = self.pre_processor.tokenajson(__snake_case )
return sequence["answer"]
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def snake_case_ ( ):
UpperCAmelCase_ : int = 1_0
UpperCAmelCase_ : Tuple = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
UpperCAmelCase_ : Dict = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [9_7], '''text''': ['''1976''']}] * 1_0,
'''id''': list(range(__lowercase ) ),
} , features=__lowercase , )
return dataset
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=__lowercase )
return filename
# FILE_CONTENT + files
__UpperCamelCase : Optional[int] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
UpperCAmelCase_ : str = FILE_CONTENT
with open(__lowercase , '''w''' ) as f:
f.write(__lowercase )
return filename
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
import bza
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
UpperCAmelCase_ : Dict = bytes(__lowercase , '''utf-8''' )
with bza.open(__lowercase , '''wb''' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
UpperCAmelCase_ : Optional[int] = bytes(__lowercase , '''utf-8''' )
with gzip.open(__lowercase , '''wb''' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
with lza.frame.open(__lowercase , '''wb''' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(__lowercase , '''w''' ) as archive:
archive.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase ):
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(__lowercase , '''w''' ) as f:
f.add(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
import lzma
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
UpperCAmelCase_ : List[str] = bytes(__lowercase , '''utf-8''' )
with lzma.open(__lowercase , '''wb''' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase ):
import zipfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
UpperCAmelCase_ : List[Any] = bytes(__lowercase , '''utf-8''' )
with zstd.open(__lowercase , '''wb''' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
UpperCAmelCase_ : Tuple = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(__lowercase , '''w''' ) as f:
f.write(__lowercase )
return filename
__UpperCamelCase : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCamelCase : Union[str, Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCamelCase : Union[str, Any] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCamelCase : Optional[Any] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCamelCase : Tuple = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='''session''' )
def snake_case_ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Dict = datasets.Dataset.from_dict(__lowercase )
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(__lowercase ) ) as con:
UpperCAmelCase_ : int = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(__lowercase , '''w''' , newline='''''' ) as f:
UpperCAmelCase_ : Any = csv.DictWriter(__lowercase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(__lowercase , '''w''' , newline='''''' ) as f:
UpperCAmelCase_ : Union[str, Any] = csv.DictWriter(__lowercase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase ):
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(__lowercase , '''rb''' ) as f:
UpperCAmelCase_ : Optional[int] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__lowercase , '''wb''' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(__lowercase , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowercase ) ) )
f.write(__lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
UpperCAmelCase_ : Tuple = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(__lowercase , '''wb''' ) as f:
UpperCAmelCase_ : Union[str, Any] = pq.ParquetWriter(__lowercase , schema=__lowercase )
UpperCAmelCase_ : List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__lowercase ) )] for k in DATA[0]} , schema=__lowercase )
writer.write_table(__lowercase )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
UpperCAmelCase_ : Optional[Any] = {'''data''': DATA}
with open(__lowercase , '''w''' ) as f:
json.dump(__lowercase , __lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
UpperCAmelCase_ : int = {'''data''': DATA_DICT_OF_LISTS}
with open(__lowercase , '''w''' ) as f:
json.dump(__lowercase , __lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(__lowercase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(__lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(__lowercase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(__lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(__lowercase , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(__lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(__lowercase , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(__lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase ):
import gzip
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(__lowercase , '''rb''' ) as orig_file:
with gzip.open(__lowercase , '''wb''' ) as zipped_file:
zipped_file.writelines(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase ):
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(__lowercase , '''rb''' ) as orig_file:
with gzip.open(__lowercase , '''wb''' ) as zipped_file:
zipped_file.writelines(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.join('''nested''' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowercase ) ) )
f.write(__lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(__lowercase , '''w''' ) as f:
f.add(__lowercase , arcname=os.path.basename(__lowercase ) )
f.add(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(__lowercase , '''w''' ) as f:
f.add(__lowercase , arcname=os.path.join('''nested''' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Tuple = ['''0''', '''1''', '''2''', '''3''']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(__lowercase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = ['''0''', '''1''', '''2''', '''3''']
UpperCAmelCase_ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(__lowercase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Dict = ['''0''', '''1''', '''2''', '''3''']
UpperCAmelCase_ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(__lowercase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowercase ) ) )
f.write(__lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(__lowercase , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowercase )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def snake_case_ ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(__lowercase , '''w''' ) as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase ) )
f.write(__lowercase , arcname=os.path.basename(__lowercase ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : int = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
return data_dir
| 641
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 1
|
from __future__ import annotations
def snake_case_ ( __lowercase , __lowercase ):
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(__lowercase ):
print(F'''{i}\t\t{d}''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
for j in range(__lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = [float('''inf''' )] * vertex_count
UpperCAmelCase_ : Optional[Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCAmelCase_ : Tuple = distance[u] + w
UpperCAmelCase_ : Dict = check_negative_cycle(__lowercase , __lowercase , __lowercase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[int] = int(input('Enter number of vertices: ').strip())
__UpperCamelCase : Optional[int] = int(input('Enter number of edges: ').strip())
__UpperCamelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__UpperCamelCase : Optional[Any] = {'src': src, 'dst': dest, 'weight': weight}
__UpperCamelCase : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__UpperCamelCase : Union[str, Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 641
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 1
|
import string
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[int] = ''''''
for i in sequence:
UpperCAmelCase_ : Optional[Any] = ord(__lowercase )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Union[str, Any] = string.ascii_letters
UpperCAmelCase_ : int = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__lowercase )] if c in letters else c for c in sequence )
def snake_case_ ( ):
from timeit import timeit
print('''Running performance benchmarks...''' )
UpperCAmelCase_ : int = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=__lowercase )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' , setup=__lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 1
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__UpperCamelCase : Tuple = '.'
if __name__ == "__main__":
__UpperCamelCase : List[Any] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
__UpperCamelCase : List[str] = []
__UpperCamelCase : str = []
with open(doctest_file_path) as fp:
for line in fp:
__UpperCamelCase : Union[str, Any] = line.strip()
__UpperCamelCase : Optional[int] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__UpperCamelCase : List[Any] = '\n'.join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 641
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 1
|
from __future__ import annotations
def snake_case_ ( __lowercase ):
if len(__lowercase ) == 0:
return array
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = min(__lowercase ), max(__lowercase )
# Compute the variables
UpperCAmelCase_ : Optional[Any] = _max - _min + 1
UpperCAmelCase_ , UpperCAmelCase_ : str = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase_ : Optional[int] = i - _min
UpperCAmelCase_ : Optional[int] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase_ : int = 0
for i in range(__lowercase ):
while holes_repeat[i] > 0:
UpperCAmelCase_ : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Any = input('Enter numbers separated by comma:\n')
__UpperCamelCase : Tuple = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 641
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Dict = 13
UpperCAmelCase_ : Any = 7
UpperCAmelCase_ : str = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : List[Any] = 99
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[int] = 32
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : List[Any] = 0.1
UpperCAmelCase_ : Union[str, Any] = 0.1
UpperCAmelCase_ : Union[str, Any] = 512
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Union[str, Any] = 0.02
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : Any = '''last'''
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Optional[Any] = 0
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCAmelCase_ : int = None
if self.use_input_lengths:
UpperCAmelCase_ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Union[str, Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : str , __snake_case : List[Any] , __snake_case : Tuple , ):
'''simple docstring'''
UpperCAmelCase_ : Any = TFFlaubertModel(config=__snake_case )
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : str = [input_ids, input_mask]
UpperCAmelCase_ : List[str] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Any , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(__snake_case )
UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : int = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : str = TFFlaubertForQuestionAnsweringSimple(__snake_case )
UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : int = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(__snake_case )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Tuple , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Dict , __snake_case : List[Any] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Tuple = TFFlaubertForTokenClassification(config=__snake_case )
UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : List[str] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.num_choices
UpperCAmelCase_ : List[str] = TFFlaubertForMultipleChoice(config=__snake_case )
UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : Optional[int] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : List[Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : Dict = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : str = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : Any = False
def _lowerCamelCase ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = TFFlaubertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__snake_case )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__snake_case )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = TFFlaubertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase_ : int = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCAmelCase_ : Any = model(__snake_case )[0]
UpperCAmelCase_ : Dict = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice.
UpperCAmelCase_ : List[str] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 641
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : int = ProphetNetTokenizer
A_ : Union[str, Any] = False
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
super().setUp()
UpperCAmelCase_ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCamelCase ( self : List[str] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase_ : List[str] = '''unwanted, running'''
return input_text, output_text
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=__snake_case , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase_ : Any = {}
for i, token in enumerate(__snake_case ):
UpperCAmelCase_ : List[str] = i
UpperCAmelCase_ : List[Any] = WordpieceTokenizer(vocab=__snake_case , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
UpperCAmelCase_ : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ : Optional[int] = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
UpperCAmelCase_ : str = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase_ : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
UpperCAmelCase_ : int = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
UpperCAmelCase_ : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(__snake_case )
UpperCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 641
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__UpperCamelCase : Tuple = get_logger(__name__)
class lowerCAmelCase__( enum.Enum ):
'''simple docstring'''
A_ : str = 'all_checks'
A_ : List[str] = 'basic_checks'
A_ : Optional[int] = 'no_checks'
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def snake_case_ ( __lowercase , __lowercase , __lowercase=None ):
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(__lowercase ) - set(__lowercase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowercase ) - set(__lowercase ) ) )
if len(set(__lowercase ) - set(__lowercase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowercase ) - set(__lowercase ) ) )
UpperCAmelCase_ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase_ : List[str] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(__lowercase ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def snake_case_ ( __lowercase , __lowercase ):
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(__lowercase ) - set(__lowercase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowercase ) - set(__lowercase ) ) )
if len(set(__lowercase ) - set(__lowercase ) ) > 0:
raise UnexpectedSplits(str(set(__lowercase ) - set(__lowercase ) ) )
UpperCAmelCase_ : Optional[int] = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowercase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowercase ) )
logger.info('''All the splits matched successfully.''' )
def snake_case_ ( __lowercase , __lowercase = True ):
if record_checksum:
UpperCAmelCase_ : Dict = shaaaa()
with open(__lowercase , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B'''''' ):
m.update(__lowercase )
UpperCAmelCase_ : Any = m.hexdigest()
else:
UpperCAmelCase_ : Optional[int] = None
return {"num_bytes": os.path.getsize(__lowercase ), "checksum": checksum}
def snake_case_ ( __lowercase ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 1
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = MODEL_FOR_MASKED_LM_MAPPING
A_ : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
UpperCAmelCase_ : str = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 38_015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 25_506, '''token_str''': ''' accuser'''},
] , )
UpperCAmelCase_ : Optional[int] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 38_015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 25_506,
'''token_str''': ''' accuser''',
},
] , )
UpperCAmelCase_ : str = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13_606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2_941, '''token_str''': ''' Te'''},
] , )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Dict = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
UpperCAmelCase_ : Dict = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 35_676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 16_416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase_ : str = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 35_676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 16_416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase_ : Union[str, Any] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2_941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13_606, '''token_str''': ''' Clara'''},
] , )
UpperCAmelCase_ : Any = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 35_676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 35_676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
UpperCAmelCase_ : Optional[int] = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_torch
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(__snake_case )
@slow
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(__snake_case )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_573, '''token_str''': ''' Chris'''},
] , )
UpperCAmelCase_ : Optional[Any] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2_201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 12_790,
'''token_str''': ''' Lyon''',
},
] , )
UpperCAmelCase_ : List[str] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 13_606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_941, '''token_str''': ''' Te'''},
] , )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
self.run_pipeline_test(__snake_case , [] )
@require_tf
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : int = None
self.run_pipeline_test(__snake_case , [] )
def _lowerCamelCase ( self : str , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
UpperCAmelCase_ : Optional[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase_ : Any = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def _lowerCamelCase ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = fill_masker.tokenizer
UpperCAmelCase_ : Union[str, Any] = fill_masker.model
UpperCAmelCase_ : Any = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase_ : Dict = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase_ : Any = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
__snake_case , [
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
] , )
with self.assertRaises(__snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__snake_case ):
fill_masker('''This is''' )
self.run_test_top_k(__snake_case , __snake_case )
self.run_test_targets(__snake_case , __snake_case )
self.run_test_top_k_targets(__snake_case , __snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(__snake_case , __snake_case )
self.fill_mask_with_multiple_masks(__snake_case , __snake_case )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = tokenizer.get_vocab()
UpperCAmelCase_ : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase_ : Optional[int] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , targets=__snake_case )
UpperCAmelCase_ : str = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase_ : List[Any] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __snake_case )
UpperCAmelCase_ : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__snake_case ) )
# Call argument
UpperCAmelCase_ : str = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase_ : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__snake_case )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase_ : List[str] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __snake_case )
UpperCAmelCase_ : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__snake_case ) )
# Score equivalence
UpperCAmelCase_ : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__snake_case )
UpperCAmelCase_ : str = [top_mask['''token_str'''] for top_mask in outputs]
UpperCAmelCase_ : Tuple = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ) == set(__snake_case ):
UpperCAmelCase_ : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__snake_case )
UpperCAmelCase_ : Optional[Any] = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
# Raises with invalid
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' )
def _lowerCamelCase ( self : int , __snake_case : List[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , top_k=2 )
UpperCAmelCase_ : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase_ : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase_ : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = tokenizer.get_vocab()
UpperCAmelCase_ : Tuple = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
# top_k=2, ntargets=3
UpperCAmelCase_ : int = sorted(vocab.keys() )[:3]
UpperCAmelCase_ : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=__snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase_ : Optional[int] = [el['''token_str'''] for el in sorted(__snake_case , key=lambda __snake_case : x["score"] , reverse=__snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ).issubset(__snake_case ):
UpperCAmelCase_ : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=__snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def _lowerCamelCase ( self : Any , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase_ : Dict = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase_ : Optional[int] = sorted(vocab.keys() )[:3]
UpperCAmelCase_ : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase_ : str = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=__snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__snake_case ) , 3 )
def _lowerCamelCase ( self : Any , __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase_ : str = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
__snake_case , [
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
] , )
| 641
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 1
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 1
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : List[str] , __snake_case : Union[str, Any]=13 , __snake_case : Any=7 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : Any=True , __snake_case : Optional[Any]=True , __snake_case : List[str]=99 , __snake_case : Optional[int]=32 , __snake_case : List[Any]=5 , __snake_case : List[str]=4 , __snake_case : Any=37 , __snake_case : str="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : Any=512 , __snake_case : List[Any]=16 , __snake_case : Any=2 , __snake_case : Union[str, Any]=0.02 , __snake_case : str=4 , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : List[Any] = use_attention_mask
UpperCAmelCase_ : int = use_token_type_ids
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Tuple = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_choices
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = None
if self.use_attention_mask:
UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Any = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : str = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = True
A_ : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = FlaxBertModelTester(self )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
| 641
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 1
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[int]=3 , __snake_case : Tuple=32 , __snake_case : Optional[Any]=3 , __snake_case : List[Any]=10 , __snake_case : List[str]=[10, 20, 30, 40] , __snake_case : Tuple=[1, 1, 2, 1] , __snake_case : List[Any]=True , __snake_case : List[str]=True , __snake_case : Tuple="relu" , __snake_case : Optional[Any]=3 , __snake_case : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : int = embeddings_size
UpperCAmelCase_ : Optional[Any] = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[Any] = num_labels
UpperCAmelCase_ : List[Any] = scope
UpperCAmelCase_ : Dict = len(__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = RegNetModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = model(__snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self : Dict , __snake_case : Dict , __snake_case : List[str] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.num_labels
UpperCAmelCase_ : Optional[int] = RegNetForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
A_ : str = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
A_ : List[str] = False
A_ : Dict = False
A_ : Tuple = False
A_ : Union[str, Any] = False
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = RegNetModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(__snake_case )
UpperCAmelCase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(config=__snake_case )
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict ):
UpperCAmelCase_ : str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ : Optional[Any] = layer_type
UpperCAmelCase_ : Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = RegNetModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( ):
UpperCAmelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__snake_case )
UpperCAmelCase_ : Optional[Any] = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**__snake_case )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase_ : int = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = SMALL_MODEL_IDENTIFIER
UpperCAmelCase_ : Union[str, Any] = '''pt'''
UpperCAmelCase_ : List[Any] = '''tf'''
def _lowerCamelCase ( self : Optional[int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__snake_case )
def _lowerCamelCase ( self : List[str] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = TFAutoModel.from_pretrained(self.test_model , from_pt=__snake_case )
model_tf.save_pretrained(__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = '''mock_framework'''
# Framework provided - return whatever the user provides
UpperCAmelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model , __snake_case )
self.assertEqual(__snake_case , __snake_case )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__snake_case )
UpperCAmelCase_ : Optional[Any] = FeaturesManager.determine_framework(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__snake_case )
UpperCAmelCase_ : Union[str, Any] = FeaturesManager.determine_framework(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__snake_case )
UpperCAmelCase_ : Tuple = FeaturesManager.determine_framework(__snake_case )
self.assertEqual(__snake_case , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__snake_case )
UpperCAmelCase_ : int = FeaturesManager.determine_framework(__snake_case )
self.assertEqual(__snake_case , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : Optional[Any] = FeaturesManager.determine_framework(__snake_case )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : str = MagicMock(return_value=__snake_case )
with patch('''transformers.onnx.features.is_tf_available''' , __snake_case ):
UpperCAmelCase_ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__snake_case , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase_ : Dict = MagicMock(return_value=__snake_case )
with patch('''transformers.onnx.features.is_torch_available''' , __snake_case ):
UpperCAmelCase_ : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__snake_case , self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase_ : int = MagicMock(return_value=__snake_case )
UpperCAmelCase_ : Tuple = MagicMock(return_value=__snake_case )
with patch('''transformers.onnx.features.is_tf_available''' , __snake_case ), patch(
'''transformers.onnx.features.is_torch_available''' , __snake_case ):
UpperCAmelCase_ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__snake_case , self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase_ : Dict = MagicMock(return_value=__snake_case )
UpperCAmelCase_ : int = MagicMock(return_value=__snake_case )
with patch('''transformers.onnx.features.is_tf_available''' , __snake_case ), patch(
'''transformers.onnx.features.is_torch_available''' , __snake_case ):
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : str = FeaturesManager.determine_framework(self.test_model )
| 641
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'big_bird'
def __init__( self : List[str] , __snake_case : List[Any]=50_358 , __snake_case : Any=768 , __snake_case : List[str]=12 , __snake_case : int=12 , __snake_case : Dict=3_072 , __snake_case : Union[str, Any]="gelu_new" , __snake_case : List[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : Union[str, Any]=4_096 , __snake_case : str=2 , __snake_case : Any=0.02 , __snake_case : Optional[Any]=1E-12 , __snake_case : int=True , __snake_case : Optional[int]=0 , __snake_case : List[str]=1 , __snake_case : Optional[int]=2 , __snake_case : Tuple=66 , __snake_case : Tuple="block_sparse" , __snake_case : Union[str, Any]=True , __snake_case : int=False , __snake_case : str=64 , __snake_case : Optional[Any]=3 , __snake_case : List[str]=None , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , sep_token_id=__snake_case , **__snake_case , )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : Union[str, Any] = rescale_embeddings
UpperCAmelCase_ : int = attention_type
UpperCAmelCase_ : str = use_bias
UpperCAmelCase_ : Tuple = block_size
UpperCAmelCase_ : int = num_random_blocks
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 641
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 1
|
def snake_case_ ( __lowercase , __lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 641
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 1
|
def snake_case_ ( __lowercase , __lowercase ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 641
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
# Recurse if needed
if "." in tensor_name:
UpperCAmelCase_ : List[str] = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCAmelCase_ : Dict = getattr(__lowercase , __lowercase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
UpperCAmelCase_ : List[Any] = new_module
UpperCAmelCase_ : Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
UpperCAmelCase_ : Optional[Any] = tensor_name in module._buffers
UpperCAmelCase_ : Optional[Any] = getattr(__lowercase , __lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Any = False
if is_buffer or not is_bitsandbytes_available():
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Tuple = False
else:
UpperCAmelCase_ : List[Any] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCAmelCase_ : List[Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCAmelCase_ : Optional[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCAmelCase_ : Optional[Any] = old_value.to(__lowercase )
elif isinstance(__lowercase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCAmelCase_ : Dict = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCAmelCase_ : Dict = torch.tensor(__lowercase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowercase ) and fpaa_statistics is None:
UpperCAmelCase_ : Optional[Any] = new_value.T
UpperCAmelCase_ : Dict = old_value.__dict__
if is_abit:
UpperCAmelCase_ : Dict = bnb.nn.IntaParams(__lowercase , requires_grad=__lowercase , **__lowercase ).to(__lowercase )
elif is_abit:
UpperCAmelCase_ : Optional[Any] = bnb.nn.Paramsabit(__lowercase , requires_grad=__lowercase , **__lowercase ).to(__lowercase )
UpperCAmelCase_ : List[str] = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(__lowercase ) )
else:
if value is None:
UpperCAmelCase_ : Any = old_value.to(__lowercase )
elif isinstance(__lowercase , torch.Tensor ):
UpperCAmelCase_ : int = value.to(__lowercase )
else:
UpperCAmelCase_ : Any = torch.tensor(__lowercase , device=__lowercase )
if is_buffer:
UpperCAmelCase_ : List[str] = new_value
else:
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(__lowercase , requires_grad=old_value.requires_grad )
UpperCAmelCase_ : Union[str, Any] = new_value
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False ):
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase_ : int = []
current_key_name.append(__lowercase )
if (isinstance(__lowercase , nn.Linear ) or isinstance(__lowercase , __lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = module.weight.shape
else:
UpperCAmelCase_ : Any = module.in_features
UpperCAmelCase_ : Any = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
__lowercase , __lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCAmelCase_ : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCAmelCase_ : Any = bnb.nn.Linearabit(
__lowercase , __lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCAmelCase_ : List[Any] = True
# Store the module class in case we need to transpose the weight later
UpperCAmelCase_ : List[str] = type(__lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowercase )
if len(list(module.children() ) ) > 0:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = _replace_with_bnb_linear(
__lowercase , __lowercase , __lowercase , __lowercase , has_been_replaced=__lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=None , __lowercase=None ):
UpperCAmelCase_ : Any = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = _replace_with_bnb_linear(
__lowercase , __lowercase , __lowercase , __lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case_ ( *__lowercase , **__lowercase ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , __lowercase , )
return replace_with_bnb_linear(*__lowercase , **__lowercase )
def snake_case_ ( *__lowercase , **__lowercase ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , __lowercase , )
return set_module_quantized_tensor_to_device(*__lowercase , **__lowercase )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Tuple = deepcopy(__lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCAmelCase_ : List[Any] = find_tied_parameters(__lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase_ : str = sum(__lowercase , [] )
UpperCAmelCase_ : List[Any] = len(__lowercase ) > 0
# Check if it is a base model
UpperCAmelCase_ : str = not hasattr(__lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase_ : Optional[int] = list(model.named_children() )
UpperCAmelCase_ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase_ : List[Any] = set(__lowercase ) - set(__lowercase )
UpperCAmelCase_ : List[str] = list(set(__lowercase ) ) + list(__lowercase )
# remove ".weight" from the keys
UpperCAmelCase_ : int = ['''.weight''', '''.bias''']
UpperCAmelCase_ : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase_ : int = name.replace(__lowercase , '''''' )
filtered_module_names.append(__lowercase )
return filtered_module_names
| 641
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : int = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'mctct'
def __init__( self : List[str] , __snake_case : Tuple=8_065 , __snake_case : List[str]=1_536 , __snake_case : List[Any]=36 , __snake_case : List[str]=6_144 , __snake_case : Any=4 , __snake_case : List[Any]=384 , __snake_case : str=920 , __snake_case : List[str]=1E-5 , __snake_case : Tuple=0.3 , __snake_case : Tuple="relu" , __snake_case : Any=0.02 , __snake_case : str=0.3 , __snake_case : Optional[int]=0.3 , __snake_case : Dict=1 , __snake_case : Dict=0 , __snake_case : int=2 , __snake_case : str=1 , __snake_case : Dict=0.3 , __snake_case : Dict=1 , __snake_case : List[Any]=(7,) , __snake_case : int=(3,) , __snake_case : List[Any]=80 , __snake_case : Optional[Any]=1 , __snake_case : List[Any]=None , __snake_case : Optional[int]="sum" , __snake_case : Tuple=False , **__snake_case : Any , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Tuple = attention_head_dim
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = layerdrop
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : str = pad_token_id
UpperCAmelCase_ : Optional[int] = bos_token_id
UpperCAmelCase_ : Optional[int] = eos_token_id
UpperCAmelCase_ : Optional[int] = conv_glu_dim
UpperCAmelCase_ : Union[str, Any] = conv_dropout
UpperCAmelCase_ : List[str] = num_conv_layers
UpperCAmelCase_ : List[Any] = input_feat_per_channel
UpperCAmelCase_ : List[str] = input_channels
UpperCAmelCase_ : Union[str, Any] = conv_channels
UpperCAmelCase_ : Dict = ctc_loss_reduction
UpperCAmelCase_ : Optional[int] = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCAmelCase_ : str = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 641
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 1
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( __lowercase , __lowercase ):
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = tmp_path / '''cache'''
UpperCAmelCase_ : List[str] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Tuple = TextDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = tmp_path / '''cache'''
UpperCAmelCase_ : List[str] = {'''text''': '''string'''}
UpperCAmelCase_ : str = features.copy() if features else default_expected_features
UpperCAmelCase_ : Optional[int] = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : List[str] = TextDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Tuple = tmp_path / '''cache'''
UpperCAmelCase_ : str = {'''text''': '''string'''}
UpperCAmelCase_ : int = TextDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
if issubclass(__lowercase , __lowercase ):
UpperCAmelCase_ : int = text_path
elif issubclass(__lowercase , __lowercase ):
UpperCAmelCase_ : Any = [text_path]
UpperCAmelCase_ : str = tmp_path / '''cache'''
UpperCAmelCase_ : Dict = {'''text''': '''string'''}
UpperCAmelCase_ : Dict = TextDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
def snake_case_ ( __lowercase , __lowercase , __lowercase=("train",) ):
assert isinstance(__lowercase , __lowercase )
for split in splits:
UpperCAmelCase_ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : str = tmp_path / '''cache'''
UpperCAmelCase_ : Tuple = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Any = TextDatasetReader({'''train''': text_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_text_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase_ : List[str] = {'''text''': '''string'''}
UpperCAmelCase_ : Union[str, Any] = features.copy() if features else default_expected_features
UpperCAmelCase_ : Optional[Any] = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : str = TextDatasetReader({'''train''': text_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_text_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
if split:
UpperCAmelCase_ : List[Any] = {split: text_path}
else:
UpperCAmelCase_ : List[Any] = '''train'''
UpperCAmelCase_ : Any = {'''train''': text_path, '''test''': text_path}
UpperCAmelCase_ : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase_ : Dict = {'''text''': '''string'''}
UpperCAmelCase_ : Optional[int] = TextDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_text_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 641
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , '''width_multiplier''' ) )
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : int=13 , __snake_case : Tuple=64 , __snake_case : str=2 , __snake_case : Dict=3 , __snake_case : List[Any]="swish" , __snake_case : Optional[int]=3 , __snake_case : Any=32 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.02 , __snake_case : str=True , __snake_case : Dict=True , __snake_case : Optional[Any]=10 , __snake_case : str=None , __snake_case : Tuple=0.25 , __snake_case : Dict=0.0 , __snake_case : Any=0.0 , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : List[Any] = make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Union[str, Any] = conv_kernel_size
UpperCAmelCase_ : Optional[int] = output_stride
UpperCAmelCase_ : Optional[int] = classifier_dropout_prob
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : Optional[int] = width_multiplier
UpperCAmelCase_ : int = ffn_dropout
UpperCAmelCase_ : Dict = attn_dropout
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCamelCase ( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = MobileViTVaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : str = MobileViTVaForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : str , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.num_labels
UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ : Optional[Any] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = False
A_ : List[str] = False
A_ : List[str] = False
A_ : List[Any] = False
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = MobileViTVaModelTester(self )
UpperCAmelCase_ : int = MobileViTVaConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(__snake_case )
UpperCAmelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(__snake_case : Optional[int] , __snake_case : Any , __snake_case : Union[str, Any] ):
UpperCAmelCase_ : Optional[int] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase_ : str = outputs.hidden_states
UpperCAmelCase_ : Union[str, Any] = 5
self.assertEqual(len(__snake_case ) , __snake_case )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ : List[str] = 2
for i in range(len(__snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = MobileViTVaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( ):
UpperCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__snake_case )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Optional[int] = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**__snake_case )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase_ : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase_ : Union[str, Any] = model.to(__snake_case )
UpperCAmelCase_ : List[str] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : Any = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**__snake_case )
UpperCAmelCase_ : Dict = outputs.logits
# verify the logits
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase_ : Optional[int] = model.to(__snake_case )
UpperCAmelCase_ : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(**__snake_case )
UpperCAmelCase_ : Any = outputs.logits.detach().cpu()
UpperCAmelCase_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(50, 60)] )
UpperCAmelCase_ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __snake_case )
UpperCAmelCase_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
UpperCAmelCase_ : str = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 1
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : Optional[int]=13 , __snake_case : Optional[int]=7 , __snake_case : Union[str, Any]=True , __snake_case : str=True , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=99 , __snake_case : Tuple=32 , __snake_case : str=2 , __snake_case : str=4 , __snake_case : Union[str, Any]=37 , __snake_case : Dict="gelu" , __snake_case : str=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Any=512 , __snake_case : Any=16 , __snake_case : str=2 , __snake_case : List[str]=0.02 , __snake_case : Dict=3 , __snake_case : Tuple=4 , __snake_case : str=None , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Dict = 13
UpperCAmelCase_ : List[Any] = 7
UpperCAmelCase_ : str = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Union[str, Any] = 99
UpperCAmelCase_ : Dict = 32
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : int = 37
UpperCAmelCase_ : Optional[Any] = '''gelu'''
UpperCAmelCase_ : List[str] = 0.1
UpperCAmelCase_ : Dict = 0.1
UpperCAmelCase_ : int = 512
UpperCAmelCase_ : Tuple = 16
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Optional[Any] = 0.02
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Any = None
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Any = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = TFRoFormerModel(config=__snake_case )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : Dict = [input_ids, input_mask]
UpperCAmelCase_ : Optional[Any] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = TFRoFormerForCausalLM(config=__snake_case )
UpperCAmelCase_ : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : Dict = model(__snake_case )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCamelCase ( self : str , __snake_case : Tuple , __snake_case : Any , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = TFRoFormerForMaskedLM(config=__snake_case )
UpperCAmelCase_ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Any = TFRoFormerForSequenceClassification(config=__snake_case )
UpperCAmelCase_ : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : int = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.num_choices
UpperCAmelCase_ : Optional[Any] = TFRoFormerForMultipleChoice(config=__snake_case )
UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : str = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : int = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : List[str] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : int , __snake_case : Any , __snake_case : int , __snake_case : str , __snake_case : List[str] , __snake_case : Dict , __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : str = TFRoFormerForTokenClassification(config=__snake_case )
UpperCAmelCase_ : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : List[str] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Dict , __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = TFRoFormerForQuestionAnswering(config=__snake_case )
UpperCAmelCase_ : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : Dict = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : List[str] = False
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFRoFormerModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__snake_case )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCAmelCase_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : Dict = model(__snake_case )[0]
# TODO Replace vocab size
UpperCAmelCase_ : List[Any] = 50_000
UpperCAmelCase_ : Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __snake_case )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase_ : Optional[Any] = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-4 )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = 1E-4
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tf.constant([[4, 10]] )
UpperCAmelCase_ : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCAmelCase_ : Any = emba(input_ids.shape )
UpperCAmelCase_ : Optional[int] = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(__snake_case , __snake_case , atol=self.tolerance )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
UpperCAmelCase_ : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCAmelCase_ : str = emba.weight[:3, :5]
tf.debugging.assert_near(__snake_case , __snake_case , atol=self.tolerance )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = 1E-4
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
# 2,12,16,64
UpperCAmelCase_ : str = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase_ : Union[str, Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase_ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCAmelCase_ : Optional[int] = embed_positions([2, 16, 768] )[None, None, :, :]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : Tuple = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
UpperCAmelCase_ : Any = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __snake_case , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __snake_case , atol=self.tolerance )
| 641
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 't5'
A_ : Optional[Any] = ['past_key_values']
A_ : str = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : str , __snake_case : List[str]=32_128 , __snake_case : Optional[int]=512 , __snake_case : Dict=64 , __snake_case : Union[str, Any]=2_048 , __snake_case : List[str]=6 , __snake_case : Optional[Any]=None , __snake_case : Any=8 , __snake_case : str=32 , __snake_case : List[str]=128 , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=1E-6 , __snake_case : List[Any]=1.0 , __snake_case : Tuple="relu" , __snake_case : str=True , __snake_case : int=True , __snake_case : Optional[Any]=0 , __snake_case : int=1 , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Tuple = d_model
UpperCAmelCase_ : int = d_kv
UpperCAmelCase_ : str = d_ff
UpperCAmelCase_ : str = num_layers
UpperCAmelCase_ : str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : Optional[int] = relative_attention_num_buckets
UpperCAmelCase_ : int = relative_attention_max_distance
UpperCAmelCase_ : List[Any] = dropout_rate
UpperCAmelCase_ : Optional[Any] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : List[str] = feed_forward_proj
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : Any = self.feed_forward_proj.split('''-''' )
UpperCAmelCase_ : Dict = act_info[-1]
UpperCAmelCase_ : int = act_info[0] == '''gated'''
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : int = '''gelu_new'''
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Dict = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
UpperCAmelCase_ : List[Any] = '''past_encoder_sequence + sequence'''
UpperCAmelCase_ : Dict = {0: '''batch'''}
UpperCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase_ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='''inputs''' )
return common_inputs
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return 13
| 641
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Any = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 1
|
from sklearn.metrics import fa_score
import datasets
__UpperCamelCase : Dict = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
__UpperCamelCase : int = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
__UpperCamelCase : List[str] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def _lowerCamelCase ( self : int , __snake_case : Any , __snake_case : List[str] , __snake_case : Any=None , __snake_case : int=1 , __snake_case : Any="binary" , __snake_case : int=None ):
'''simple docstring'''
UpperCAmelCase_ : Dict = fa_score(
__snake_case , __snake_case , labels=__snake_case , pos_label=__snake_case , average=__snake_case , sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 641
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 1
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Tuple = 'sequence-classification'
def __init__( self : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Dict = Namespace(**__snake_case )
UpperCAmelCase_ : Union[str, Any] = glue_output_modes[hparams.task]
UpperCAmelCase_ : Dict = glue_tasks_num_labels[hparams.task]
super().__init__(__snake_case , __snake_case , self.mode )
def _lowerCamelCase ( self : List[Any] , **__snake_case : Any ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : Any , __snake_case : str , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase_ : Optional[Any] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
UpperCAmelCase_ : Optional[int] = self(**__snake_case )
UpperCAmelCase_ : int = outputs[0]
UpperCAmelCase_ : Dict = self.trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase_ : Optional[Any] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.hparams
UpperCAmelCase_ : Dict = processors[args.task]()
UpperCAmelCase_ : int = processor.get_labels()
for mode in ["train", "dev"]:
UpperCAmelCase_ : str = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
UpperCAmelCase_ : List[str] = convert_examples_to_features(
__snake_case , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''dev''' if mode == '''test''' else mode
UpperCAmelCase_ : Any = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Dict = torch.load(__snake_case )
UpperCAmelCase_ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : List[str] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
UpperCAmelCase_ : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase_ : List[Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case , shuffle=__snake_case , )
def _lowerCamelCase ( self : Dict , __snake_case : Union[str, Any] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase_ : Any = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
UpperCAmelCase_ : Tuple = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : str = outputs[:2]
UpperCAmelCase_ : List[Any] = logits.detach().cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
UpperCAmelCase_ : Optional[Any] = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase_ : Union[str, Any] = np.squeeze(__snake_case )
UpperCAmelCase_ : Optional[int] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : str = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : str = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Union[str, Any] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __snake_case , __snake_case )}
UpperCAmelCase_ : List[str] = dict(results.items() )
UpperCAmelCase_ : Optional[int] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[Any] , __snake_case : list ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self._eval_end(__snake_case )
UpperCAmelCase_ : Dict = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = self._eval_end(__snake_case )
UpperCAmelCase_ : List[str] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__snake_case , required=__snake_case , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def snake_case_ ( ):
UpperCAmelCase_ : Any = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
UpperCAmelCase_ : Tuple = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCAmelCase_ : int = os.path.join(
'''./results''' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
UpperCAmelCase_ : str = GLUETransformer(__lowercase )
UpperCAmelCase_ : str = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCAmelCase_ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__lowercase ) )
UpperCAmelCase_ : Optional[int] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main()
| 641
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 1
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
if isinstance(__lowercase , torch.Tensor ):
return image
elif isinstance(__lowercase , PIL.Image.Image ):
UpperCAmelCase_ : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCAmelCase_ : List[str] = np.concatenate(__lowercase , axis=0 )
UpperCAmelCase_ : Dict = np.array(__lowercase ).astype(np.floataa ) / 2_5_5.0
UpperCAmelCase_ : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ : Optional[Any] = 2.0 * image - 1.0
UpperCAmelCase_ : int = torch.from_numpy(__lowercase )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase , dim=0 )
return image
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase=0.9_9_9_5 ):
if not isinstance(__lowercase , np.ndarray ):
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Union[str, Any] = va.device
UpperCAmelCase_ : int = va.cpu().numpy()
UpperCAmelCase_ : Optional[int] = va.cpu().numpy()
UpperCAmelCase_ : Optional[Any] = np.sum(va * va / (np.linalg.norm(__lowercase ) * np.linalg.norm(__lowercase )) )
if np.abs(__lowercase ) > DOT_THRESHOLD:
UpperCAmelCase_ : Optional[int] = (1 - t) * va + t * va
else:
UpperCAmelCase_ : Any = np.arccos(__lowercase )
UpperCAmelCase_ : Optional[Any] = np.sin(__lowercase )
UpperCAmelCase_ : Optional[int] = theta_a * t
UpperCAmelCase_ : Optional[Any] = np.sin(__lowercase )
UpperCAmelCase_ : Any = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase_ : List[str] = sin_theta_t / sin_theta_a
UpperCAmelCase_ : Tuple = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase_ : Dict = torch.from_numpy(__lowercase ).to(__lowercase )
return va
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : str = F.normalize(__lowercase , dim=-1 )
UpperCAmelCase_ : Optional[int] = F.normalize(__lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def snake_case_ ( __lowercase , __lowercase ):
for param in model.parameters():
UpperCAmelCase_ : List[Any] = value
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : int=None , __snake_case : str=None , __snake_case : Any=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
UpperCAmelCase_ : Any = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['''shortest_edge''']
)
UpperCAmelCase_ : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.enable_attention_slicing(__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
set_requires_grad(self.vae , __snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
set_requires_grad(self.vae , __snake_case )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
set_requires_grad(self.unet , __snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
set_requires_grad(self.unet , __snake_case )
def _lowerCamelCase ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
# get the original timestep using init_timestep
UpperCAmelCase_ : Optional[int] = min(int(num_inference_steps * strength ) , __snake_case )
UpperCAmelCase_ : str = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(__snake_case )}''' )
UpperCAmelCase_ : str = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
UpperCAmelCase_ : str = torch.cat(__snake_case , dim=0 )
else:
UpperCAmelCase_ : Optional[Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ : str = 0.18_215 * init_latents
UpperCAmelCase_ : Tuple = init_latents.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase_ : Optional[int] = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
UpperCAmelCase_ : Union[str, Any] = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : List[Any] = init_latents
return latents
def _lowerCamelCase ( self : str , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase_ : str = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCAmelCase_ : Dict = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def _lowerCamelCase ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.feature_extractor.preprocess(__snake_case )
UpperCAmelCase_ : List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase_ : Optional[Any] = self.clip_model.get_image_features(__snake_case )
UpperCAmelCase_ : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Union[str, Any] = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowerCamelCase ( self : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : int , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = latents.detach().requires_grad_()
UpperCAmelCase_ : List[str] = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
UpperCAmelCase_ : Union[str, Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase_ : Any = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase_ : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase_ : Optional[int] = torch.sqrt(__snake_case )
UpperCAmelCase_ : List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
UpperCAmelCase_ : Optional[int] = self.scheduler.sigmas[index]
UpperCAmelCase_ : Tuple = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ : Union[str, Any] = 1 / 0.18_215 * sample
UpperCAmelCase_ : Optional[Any] = self.vae.decode(__snake_case ).sample
UpperCAmelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : Optional[Any] = transforms.Resize(self.feature_extractor_size )(__snake_case )
UpperCAmelCase_ : int = self.normalize(__snake_case ).to(latents.dtype )
UpperCAmelCase_ : Optional[int] = self.clip_model.get_image_features(__snake_case )
UpperCAmelCase_ : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
UpperCAmelCase_ : Dict = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
UpperCAmelCase_ : int = latents.detach() + grads * (sigma**2)
UpperCAmelCase_ : List[Any] = noise_pred_original
else:
UpperCAmelCase_ : Optional[int] = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : List[Any] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 512 , __snake_case : Optional[int] = 512 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 100 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
'''simple docstring'''
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
UpperCAmelCase_ : Dict = [generator] + [None] * (batch_size - 1)
UpperCAmelCase_ : Optional[int] = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCAmelCase_ : Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase_ : Any = ''', '''.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCAmelCase_ : Tuple = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCAmelCase_ : Optional[Any] = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
UpperCAmelCase_ : Tuple = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_ : Any = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
UpperCAmelCase_ : int = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_ : List[Any] = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ : List[str] = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
UpperCAmelCase_ : Optional[int] = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase_ : Optional[Any] = {}
if accepts_offset:
UpperCAmelCase_ : List[str] = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.get_timesteps(__snake_case , __snake_case , self.device )
UpperCAmelCase_ : Optional[int] = timesteps[:1].repeat(__snake_case )
# Preprocess image
UpperCAmelCase_ : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : int = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
UpperCAmelCase_ : Union[str, Any] = preprocess(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : Tuple = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
UpperCAmelCase_ : List[str] = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
UpperCAmelCase_ : Any = self.get_clip_image_embeddings(__snake_case , __snake_case )
UpperCAmelCase_ : Tuple = self.get_clip_image_embeddings(__snake_case , __snake_case )
UpperCAmelCase_ : Union[str, Any] = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_ : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ : int = content_text_input.input_ids.shape[-1]
UpperCAmelCase_ : Optional[Any] = self.tokenizer([''''''] , padding='''max_length''' , max_length=__snake_case , return_tensors='''pt''' )
UpperCAmelCase_ : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase_ : int = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_ : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase_ : int = torch.randn(__snake_case , generator=__snake_case , device='''cpu''' , dtype=__snake_case ).to(
self.device )
else:
UpperCAmelCase_ : Dict = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase_ : Dict = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ : str = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ : List[str] = {}
if accepts_eta:
UpperCAmelCase_ : Tuple = eta
# check if the scheduler accepts generator
UpperCAmelCase_ : int = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase_ : Dict = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : int = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
UpperCAmelCase_ : Optional[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = noise_pred.chunk(2 )
UpperCAmelCase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase_ : Dict = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ : Union[str, Any] = 1 / 0.18_215 * latents
UpperCAmelCase_ : Optional[Any] = self.vae.decode(__snake_case ).sample
UpperCAmelCase_ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
| 641
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Any = 'bloom'
A_ : List[Any] = ['past_key_values']
A_ : str = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : str , __snake_case : str=250_880 , __snake_case : Union[str, Any]=64 , __snake_case : Optional[int]=2 , __snake_case : List[Any]=8 , __snake_case : Any=1E-5 , __snake_case : List[Any]=0.02 , __snake_case : int=True , __snake_case : int=1 , __snake_case : Optional[Any]=2 , __snake_case : Optional[Any]=False , __snake_case : Optional[Any]=0.0 , __snake_case : str=0.0 , __snake_case : Any=1 , __snake_case : List[Any]=False , **__snake_case : List[Any] , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Tuple = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : Any = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : List[Any] = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Optional[Any] = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : int = use_cache
UpperCAmelCase_ : str = pretraining_tp
UpperCAmelCase_ : Union[str, Any] = apply_residual_connection_post_layernorm
UpperCAmelCase_ : Tuple = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Union[str, Any] = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Tuple = slow_but_exact
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[Any] = version.parse('1.12' )
def __init__( self : List[str] , __snake_case : PretrainedConfig , __snake_case : str = "default" , __snake_case : List[PatchingSpec] = None , __snake_case : bool = False , ):
'''simple docstring'''
super().__init__(__snake_case , task=__snake_case , patching_specs=__snake_case , use_past=__snake_case )
if not getattr(self._config , '''pad_token_id''' , __snake_case ):
# TODO: how to do that better?
UpperCAmelCase_ : Dict = 0
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__snake_case , direction='''inputs''' , inverted_values_shape=__snake_case )
UpperCAmelCase_ : List[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ : str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return self._config.n_layer
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self._config.n_head
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return 1E-3
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : "PreTrainedTokenizer" , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional["TensorType"] = None , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ : List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ : Tuple = seqlen + 2
UpperCAmelCase_ : Optional[Any] = self._config.hidden_size // self.num_attention_heads
UpperCAmelCase_ : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCAmelCase_ : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCAmelCase_ : Union[str, Any] = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
UpperCAmelCase_ : Tuple = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase_ : Tuple = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return 13
| 641
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 1
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : List[str] = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 1
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
UpperCAmelCase_ : Dict = img
UpperCAmelCase_ : Any = img.shape[1]
UpperCAmelCase_ : List[Any] = img.shape[0]
UpperCAmelCase_ : Optional[int] = dst_width
UpperCAmelCase_ : Any = dst_height
UpperCAmelCase_ : Dict = self.src_w / self.dst_w
UpperCAmelCase_ : Union[str, Any] = self.src_h / self.dst_h
UpperCAmelCase_ : Union[str, Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase_ : Dict = self.img[self.get_y(__snake_case )][self.get_x(__snake_case )]
def _lowerCamelCase ( self : Optional[int] , __snake_case : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def _lowerCamelCase ( self : int , __snake_case : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCamelCase , __UpperCamelCase : Tuple = 800, 600
__UpperCamelCase : Optional[Any] = imread('image_data/lena.jpg', 1)
__UpperCamelCase : str = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 641
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = VQModel
A_ : Optional[int] = 'sample'
@property
def _lowerCamelCase ( self : List[Any] , __snake_case : Union[str, Any]=(32, 32) ):
'''simple docstring'''
UpperCAmelCase_ : int = 4
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
return {"sample": image}
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (3, 32, 32)
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (3, 32, 32)
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__snake_case )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(__snake_case ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCAmelCase_ : Union[str, Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCAmelCase_ : List[Any] = image.to(__snake_case )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(__snake_case ).sample
UpperCAmelCase_ : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase_ : Dict = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
| 641
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 641
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 1
|
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowerCAmelCase__( _lowerCamelCase ):
'''simple docstring'''
A_ : Optional[int] = 4_2
A_ : Optional[Any] = None
def snake_case_ ( __lowercase , __lowercase=0.9_9_9 , __lowercase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowercase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowercase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase_ : int = []
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_ : List[str] = i / num_diffusion_timesteps
UpperCAmelCase_ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class lowerCAmelCase__( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
A_ : Tuple = 1
@register_to_config
def __init__( self : int , __snake_case : Any = 1_000 , __snake_case : Tuple = 0.0_001 , __snake_case : Tuple = 0.02 , __snake_case : Tuple = "linear" , __snake_case : Dict = None , __snake_case : Optional[int] = True , __snake_case : Optional[int] = True , __snake_case : List[Any] = 0 , __snake_case : int = "epsilon" , __snake_case : Tuple = 1.0 , **__snake_case : Dict , ):
'''simple docstring'''
if kwargs.get('''set_alpha_to_one''' , A__ ) is not None:
UpperCAmelCase_ : List[str] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , A__ , standard_warn=A__ )
UpperCAmelCase_ : Union[str, Any] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
UpperCAmelCase_ : List[Any] = torch.tensor(A__ , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ : int = torch.linspace(A__ , A__ , A__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ : List[str] = betas_for_alpha_bar(A__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCAmelCase_ : Any = 1.0 - self.betas
UpperCAmelCase_ : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
UpperCAmelCase_ : Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
UpperCAmelCase_ : Dict = 1.0
# setable values
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[str] = torch.from_numpy(np.arange(0 , A__ ).copy().astype(np.intaa ) )
def _lowerCamelCase ( self : Dict , __snake_case : Any , __snake_case : Union[str, Any] = None ):
'''simple docstring'''
return sample
def _lowerCamelCase ( self : Any , __snake_case : Any , __snake_case : Optional[Any] = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
UpperCAmelCase_ : str = num_inference_steps
UpperCAmelCase_ : List[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : Any = (np.arange(0 , A__ ) * step_ratio).round().copy().astype(np.intaa )
UpperCAmelCase_ : Optional[int] = torch.from_numpy(A__ ).to(A__ )
self.timesteps += self.config.steps_offset
def _lowerCamelCase ( self : Any , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Union[str, Any] = 0.0 , __snake_case : Union[str, Any] = False , __snake_case : Optional[Any] = None , __snake_case : Union[str, Any] = True , ):
'''simple docstring'''
# 1. get previous step value (=t+1)
UpperCAmelCase_ : Optional[int] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
UpperCAmelCase_ : Any = self.alphas_cumprod[timestep]
UpperCAmelCase_ : List[Any] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
UpperCAmelCase_ : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
UpperCAmelCase_ : Dict = model_output
elif self.config.prediction_type == "sample":
UpperCAmelCase_ : Tuple = model_output
UpperCAmelCase_ : Tuple = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
UpperCAmelCase_ : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ : List[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : str = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=A__ , pred_original_sample=A__ )
def __len__( self : Dict ):
'''simple docstring'''
return self.config.num_train_timesteps
| 700
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 0
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : int = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = emb.weight.data
return lin_layer
def snake_case_ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ):
UpperCAmelCase_ : Optional[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase_ : Any = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
UpperCAmelCase_ : Tuple = '''relu'''
UpperCAmelCase_ : List[str] = state_dict['''decoder.embed_tokens.weight''']
UpperCAmelCase_ : Optional[int] = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
UpperCAmelCase_ : Dict = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
__UpperCamelCase : Dict = parser.parse_args()
__UpperCamelCase : Any = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 702
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase__( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ : Dict = "swinv2"
A_ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , __snake_case : Tuple=224 , __snake_case : Optional[Any]=4 , __snake_case : Optional[int]=3 , __snake_case : Union[str, Any]=96 , __snake_case : Dict=[2, 2, 6, 2] , __snake_case : str=[3, 6, 12, 24] , __snake_case : Any=7 , __snake_case : Tuple=4.0 , __snake_case : Tuple=True , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Optional[int]=0.1 , __snake_case : str="gelu" , __snake_case : List[Any]=False , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : str=32 , **__snake_case : Tuple , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : List[Any] = depths
UpperCAmelCase_ : Optional[Any] = len(A_ )
UpperCAmelCase_ : Optional[Any] = num_heads
UpperCAmelCase_ : List[str] = window_size
UpperCAmelCase_ : Dict = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Dict = use_absolute_embeddings
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(A_ ) - 1) )
UpperCAmelCase_ : Any = (0, 0, 0, 0)
| 703
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 0
|
def snake_case_ ( __lowercase , __lowercase ):
_enforce_args(_lowercase , _lowercase )
if n == 0:
return 0
UpperCAmelCase_ : List[Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCAmelCase_ : int = max(
_lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , _lowercase ) )
return max_revue
def snake_case_ ( __lowercase , __lowercase ):
_enforce_args(_lowercase , _lowercase )
UpperCAmelCase_ : int = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_lowercase , _lowercase , _lowercase )
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCAmelCase_ : int = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCAmelCase_ : List[str] = max(
_lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _lowercase , _lowercase ) , )
UpperCAmelCase_ : List[str] = max_revenue
return max_rev[n]
def snake_case_ ( __lowercase , __lowercase ):
_enforce_args(_lowercase , _lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCAmelCase_ : Any = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCAmelCase_ : List[str] = 0
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Tuple = max_rev[i]
for j in range(1 , i + 1 ):
UpperCAmelCase_ : Optional[int] = max(_lowercase , prices[j - 1] + max_rev[i - j] )
UpperCAmelCase_ : Union[str, Any] = max_revenue_i
return max_rev[n]
def snake_case_ ( __lowercase , __lowercase ):
if n < 0:
UpperCAmelCase_ : int = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(_lowercase )
if n > len(_lowercase ):
UpperCAmelCase_ : Any = (
'Each integral piece of rod must have a corresponding price. '
F'''Got n = {n} but length of prices = {len(_lowercase )}'''
)
raise ValueError(_lowercase )
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
UpperCAmelCase_ : Optional[int] = len(_lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCAmelCase_ : int = 3_6
UpperCAmelCase_ : Optional[int] = top_down_cut_rod(_lowercase , _lowercase )
UpperCAmelCase_ : List[Any] = bottom_up_cut_rod(_lowercase , _lowercase )
UpperCAmelCase_ : List[str] = naive_cut_rod_recursive(_lowercase , _lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 704
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Optional[int] = 3_8_4
if "tiny" in model_name:
UpperCAmelCase_ : str = [3, 3, 9, 3]
UpperCAmelCase_ : Optional[int] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
UpperCAmelCase_ : Union[str, Any] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Any = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
UpperCAmelCase_ : List[Any] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Optional[int] = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
UpperCAmelCase_ : Optional[int] = 5_1_2
if "large" in model_name:
UpperCAmelCase_ : Optional[int] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Optional[Any] = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
UpperCAmelCase_ : Optional[Any] = 7_6_8
if "xlarge" in model_name:
UpperCAmelCase_ : List[Any] = [3, 3, 2_7, 3]
UpperCAmelCase_ : Tuple = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
UpperCAmelCase_ : int = 1_0_2_4
# set label information
UpperCAmelCase_ : Optional[Any] = 1_5_0
UpperCAmelCase_ : int = "huggingface/label-files"
UpperCAmelCase_ : Dict = "ade20k-id2label.json"
UpperCAmelCase_ : Optional[int] = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : List[str] = {int(_A ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Dict = ConvNextConfig(
depths=_A , hidden_sizes=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
UpperCAmelCase_ : Optional[Any] = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : int = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = dct.pop(_A )
UpperCAmelCase_ : Dict = val
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Union[str, Any] = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
UpperCAmelCase_ : Union[str, Any] = model_name_to_url[model_name]
UpperCAmelCase_ : Tuple = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )["state_dict"]
UpperCAmelCase_ : Dict = get_upernet_config(_A )
UpperCAmelCase_ : Dict = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase_ : Optional[int] = state_dict.pop(_A )
if "bn" in key:
UpperCAmelCase_ : Optional[int] = key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase_ : List[str] = val
# rename keys
UpperCAmelCase_ : int = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
model.load_state_dict(_A )
# verify on image
UpperCAmelCase_ : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
UpperCAmelCase_ : List[Any] = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
UpperCAmelCase_ : Tuple = SegformerImageProcessor()
UpperCAmelCase_ : List[Any] = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(_A )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase_ : str = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_A )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = image.size
UpperCAmelCase_ : List[Any] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase_ : Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCAmelCase_ : List[Any] = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_5_5.0
UpperCAmelCase_ : Any = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ : Dict = torch.from_numpy(lowerCamelCase_ )
return 2.0 * image - 1.0
class lowerCAmelCase__( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : VQModel , __snake_case : UNetaDModel , __snake_case : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
@torch.no_grad()
def __call__( self : Optional[Any] , __snake_case : Union[torch.Tensor, PIL.Image.Image] = None , __snake_case : Optional[int] = 1 , __snake_case : Optional[int] = 100 , __snake_case : Optional[float] = 0.0 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase_ : List[str] = 1
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
UpperCAmelCase_ : Any = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase_ )}''' )
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase_ : str = preprocess(UpperCAmelCase_ )
UpperCAmelCase_ : Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase_ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase_ : int = next(self.unet.parameters() ).dtype
UpperCAmelCase_ : Union[str, Any] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = image.to(device=self.device , dtype=UpperCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ : List[Any] = {}
if accepts_eta:
UpperCAmelCase_ : Tuple = eta
for t in self.progress_bar(UpperCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase_ : Union[str, Any] = torch.cat([latents, image] , dim=1 )
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
UpperCAmelCase_ : Union[str, Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase_ : Dict = self.vqvae.decode(UpperCAmelCase_ ).sample
UpperCAmelCase_ : List[Any] = torch.clamp(UpperCAmelCase_ , -1.0 , 1.0 )
UpperCAmelCase_ : Dict = image / 2 + 0.5
UpperCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 706
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : List[Any]=7 , __snake_case : List[Any]=True , __snake_case : List[str]=True , __snake_case : str=True , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=99 , __snake_case : Union[str, Any]=64 , __snake_case : Dict=32 , __snake_case : Tuple=5 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]=37 , __snake_case : Tuple="gelu" , __snake_case : int=0.1 , __snake_case : List[Any]=0.1 , __snake_case : str=512 , __snake_case : int=16 , __snake_case : Any=2 , __snake_case : Optional[int]=0.02 , __snake_case : Tuple=3 , __snake_case : Any=4 , __snake_case : Dict=None , ):
'''simple docstring'''
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Tuple = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = embedding_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : Dict = scope
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = MegatronBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ : Tuple = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
UpperCAmelCase_ : List[str] = model(_lowercase , token_type_ids=_lowercase )
UpperCAmelCase_ : List[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = MegatronBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MegatronBertForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = MegatronBertForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ : Dict = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : int , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = MegatronBertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : Tuple , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = MegatronBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ : List[str] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Any , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.num_labels
UpperCAmelCase_ : Optional[int] = MegatronBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ : str = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : List[str] = MegatronBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ : Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : List[str] , __snake_case : Tuple , __snake_case : int , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.num_choices
UpperCAmelCase_ : Tuple = MegatronBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Tuple = config_and_inputs
UpperCAmelCase_ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Any = True
# test_resize_embeddings = False
A_ : List[str] = False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict=False ):
'''simple docstring'''
UpperCAmelCase_ : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
UpperCAmelCase_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
UpperCAmelCase_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = MegatronBertModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowercase )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowercase )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowercase )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowercase )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowercase )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowercase )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowercase )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowercase )
def snake_case_ ( __lowercase ):
return torch.tensor(
__lowercase , dtype=torch.long , device=__lowercase , )
__UpperCamelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
UpperCAmelCase_ : Optional[Any] = os.path.join(os.environ['''MYDIR'''] , _lowercase )
UpperCAmelCase_ : List[str] = MegatronBertModel.from_pretrained(_lowercase )
model.to(_lowercase )
model.half()
UpperCAmelCase_ : Tuple = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(_lowercase )[0]
UpperCAmelCase_ : int = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , _lowercase )
UpperCAmelCase_ : Union[str, Any] = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase_ : int = output[0, ii, jj]
UpperCAmelCase_ : Any = expected[3 * ii + jj]
UpperCAmelCase_ : int = 'ii={} jj={} a={} b={}'.format(_lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(math.isclose(_lowercase , _lowercase , rel_tol=_lowercase , abs_tol=_lowercase ) , msg=_lowercase )
| 707
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 0
|
def snake_case_ ( __lowercase , __lowercase , __lowercase = 0 , __lowercase = 0 ):
UpperCAmelCase_ : List[Any] = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 0
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = np.random.RandomState(_lowerCAmelCase )
UpperCAmelCase_ : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : str = self.get_dummy_inputs()
UpperCAmelCase_ : str = pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase_ : int = pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Tuple = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs()
UpperCAmelCase_ : Union[str, Any] = pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : Dict = self.get_dummy_inputs()
UpperCAmelCase_ : Union[str, Any] = pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : int = self.get_dummy_inputs()
UpperCAmelCase_ : int = pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs()
UpperCAmelCase_ : Tuple = pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ : List[str] = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs()
UpperCAmelCase_ : str = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase_ : Tuple = pipe(**_lowerCAmelCase )
UpperCAmelCase_ : List[str] = output.images[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs()
UpperCAmelCase_ : str = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase_ : int = pipe.tokenizer(
_lowerCAmelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='''np''' , )
UpperCAmelCase_ : List[str] = text_inputs['''input_ids''']
UpperCAmelCase_ : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
UpperCAmelCase_ : Union[str, Any] = prompt_embeds
# forward
UpperCAmelCase_ : Union[str, Any] = pipe(**_lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : int = self.get_dummy_inputs()
UpperCAmelCase_ : Tuple = 3 * ['''this is a negative prompt''']
UpperCAmelCase_ : int = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase_ : Optional[Any] = pipe(**_lowerCAmelCase )
UpperCAmelCase_ : Dict = output.images[0, -3:, -3:, -1]
UpperCAmelCase_ : str = self.get_dummy_inputs()
UpperCAmelCase_ : str = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase_ : Any = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Optional[int] = pipe.tokenizer(
_lowerCAmelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='''np''' , )
UpperCAmelCase_ : Union[str, Any] = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = embeds
# forward
UpperCAmelCase_ : str = pipe(**_lowerCAmelCase )
UpperCAmelCase_ : List[str] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ort.SessionOptions()
UpperCAmelCase_ : Optional[int] = False
return options
def _lowerCamelCase ( self : int ):
'''simple docstring'''
# using the PNDM scheduler by default
UpperCAmelCase_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : List[str] = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : List[str] = '''open neural network exchange'''
UpperCAmelCase_ : Any = np.random.RandomState(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type='''np''' )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Any = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : str = '''open neural network exchange'''
UpperCAmelCase_ : Optional[Any] = np.random.RandomState(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type='''np''' )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Any = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = 0
def test_callback_fn(__snake_case : Tuple , __snake_case : Any , __snake_case : Optional[Any] ) -> None:
UpperCAmelCase_ : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : Tuple = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : Tuple = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : int = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = '''Andromeda galaxy in a bottle'''
UpperCAmelCase_ : Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
UpperCAmelCase_ : Union[str, Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 709
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 0
|
import sys
from collections import defaultdict
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = []
def _lowerCamelCase ( self : List[str] , __snake_case : Any ):
'''simple docstring'''
return self.node_position[vertex]
def _lowerCamelCase ( self : Dict , __snake_case : List[str] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = pos
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : List[Any] ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase_ : int = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase_ : List[Any] = 2 * start + 1
else:
UpperCAmelCase_ : Union[str, Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase_ : Union[str, Any] = heap[smallest_child], positions[smallest_child]
UpperCAmelCase_ : List[str] = (
heap[start],
positions[start],
)
UpperCAmelCase_ : Union[str, Any] = temp, tempa
UpperCAmelCase_ : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase_ )
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = position[index]
while index != 0:
UpperCAmelCase_ : List[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase_ : Union[str, Any] = heap[parent]
UpperCAmelCase_ : Dict = position[parent]
self.set_position(position[parent] , UpperCamelCase_ )
else:
UpperCAmelCase_ : Dict = val
UpperCAmelCase_ : List[str] = temp
self.set_position(UpperCamelCase_ , UpperCamelCase_ )
break
UpperCAmelCase_ : List[str] = parent
else:
UpperCAmelCase_ : List[str] = val
UpperCAmelCase_ : Union[str, Any] = temp
self.set_position(UpperCamelCase_ , 0 )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = len(UpperCamelCase_ ) // 2 - 1
for i in range(UpperCamelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , len(UpperCamelCase_ ) , UpperCamelCase_ )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : int , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = positions[0]
UpperCAmelCase_ : Tuple = sys.maxsize
self.top_to_bottom(UpperCamelCase_ , 0 , len(UpperCamelCase_ ) , UpperCamelCase_ )
return temp
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = Heap()
UpperCAmelCase_ : Tuple = [0] * len(lowerCamelCase__ )
UpperCAmelCase_ : Optional[int] = [-1] * len(lowerCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase_ : Tuple = [] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase_ : Dict = []
for vertex in range(len(lowerCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCamelCase__ )
heap.node_position.append(lowerCamelCase__ )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : int = distance
heap.heapify(lowerCamelCase__ , lowerCamelCase__ )
for _ in range(1 , len(lowerCamelCase__ ) ):
UpperCAmelCase_ : str = heap.delete_minimum(lowerCamelCase__ , lowerCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase_ : Dict = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCamelCase__ )]
):
UpperCAmelCase_ : List[str] = distance
heap.bottom_to_top(
lowerCamelCase__ , heap.get_position(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase_ : List[str] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase : int = int(input('Enter number of edges: ').strip())
__UpperCamelCase : Tuple = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase : Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 710
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Tuple , **__snake_case : Optional[int] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase_ : Tuple = deprecated_arg[3:]
UpperCAmelCase_ : Tuple = not kwargs.pop(__A )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase_ : Dict = kwargs.pop('''tpu_name''' , self.tpu_name )
UpperCAmelCase_ : List[Any] = kwargs.pop('''device_idx''' , self.device_idx )
UpperCAmelCase_ : Optional[int] = kwargs.pop('''eager_mode''' , self.eager_mode )
UpperCAmelCase_ : Optional[Any] = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**__A )
A_ : List[Any] = field(
default=snake_case__ , metadata={'help': 'Name of TPU'} , )
A_ : Optional[Any] = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
A_ : str = field(default=snake_case__ , metadata={'help': 'Benchmark models in eager model.'} )
A_ : Optional[Any] = field(
default=snake_case__ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
UpperCAmelCase_ : Any = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase_ : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase_ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase_ : Tuple = None
return tpu
@cached_property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase_ : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
UpperCAmelCase_ : Tuple = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
UpperCAmelCase_ : Optional[int] = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.n_gpu > 0
| 711
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 0
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : str = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__( UpperCamelCase_ ):
'''simple docstring'''
A_ : Union[str, Any] = """data2vec-audio"""
def __init__( self : str , __snake_case : Optional[int]=32 , __snake_case : Tuple=768 , __snake_case : Optional[int]=12 , __snake_case : Dict=12 , __snake_case : List[Any]=3_072 , __snake_case : List[str]="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : Dict=0.1 , __snake_case : str=0.1 , __snake_case : List[str]=0.0 , __snake_case : int=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : Optional[Any]=0.02 , __snake_case : int=1E-5 , __snake_case : List[Any]="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , __snake_case : Tuple=False , __snake_case : Any=16 , __snake_case : Optional[int]=19 , __snake_case : Optional[int]=5 , __snake_case : List[str]=0.05 , __snake_case : Union[str, Any]=10 , __snake_case : Union[str, Any]=2 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]=10 , __snake_case : List[Any]=0 , __snake_case : Tuple="sum" , __snake_case : str=False , __snake_case : Any=False , __snake_case : List[str]=256 , __snake_case : List[Any]=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Dict=(1, 2, 3, 1, 1) , __snake_case : Tuple=512 , __snake_case : Tuple=0 , __snake_case : Any=1 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=False , __snake_case : Union[str, Any]=3 , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : Tuple=None , **__snake_case : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Any = feat_extract_activation
UpperCAmelCase_ : Dict = list(_a )
UpperCAmelCase_ : int = list(_a )
UpperCAmelCase_ : str = list(_a )
UpperCAmelCase_ : str = conv_bias
UpperCAmelCase_ : Optional[int] = num_conv_pos_embeddings
UpperCAmelCase_ : List[str] = num_conv_pos_embedding_groups
UpperCAmelCase_ : List[str] = conv_pos_kernel_size
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Tuple = hidden_dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Union[str, Any] = activation_dropout
UpperCAmelCase_ : int = feat_proj_dropout
UpperCAmelCase_ : List[Any] = final_dropout
UpperCAmelCase_ : Tuple = layerdrop
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = mask_time_prob
UpperCAmelCase_ : Any = mask_time_length
UpperCAmelCase_ : List[str] = mask_time_min_masks
UpperCAmelCase_ : List[Any] = mask_feature_prob
UpperCAmelCase_ : Tuple = mask_feature_length
UpperCAmelCase_ : Union[str, Any] = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ : Dict = ctc_loss_reduction
UpperCAmelCase_ : List[Any] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : int = add_adapter
UpperCAmelCase_ : List[str] = adapter_kernel_size
UpperCAmelCase_ : List[Any] = adapter_stride
UpperCAmelCase_ : Union[str, Any] = num_adapter_layers
UpperCAmelCase_ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Tuple = list(_a )
UpperCAmelCase_ : Optional[Any] = list(_a )
UpperCAmelCase_ : Optional[Any] = list(_a )
UpperCAmelCase_ : Dict = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return math.prod(self.conv_stride )
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 0
|
'''simple docstring'''
import torch
def snake_case_ ( ):
if torch.cuda.is_available():
UpperCAmelCase_ : List[str] = torch.cuda.device_count()
else:
UpperCAmelCase_ : List[Any] = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 713
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Tuple = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(lowerCAmelCase__ , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = f.readlines()
UpperCAmelCase_ : Optional[Any] = F'''class {class_name}('''
UpperCAmelCase_ : int = F'''{4 * ' '}def {test_name}('''
UpperCAmelCase_ : List[Any] = F'''{8 * ' '}{correct_line.split()[0]}'''
UpperCAmelCase_ : Tuple = F'''{1_6 * ' '}{correct_line.split()[0]}'''
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Any = []
for line in lines:
if line.startswith(lowerCAmelCase__ ):
UpperCAmelCase_ : Any = True
elif in_class and line.startswith(lowerCAmelCase__ ):
UpperCAmelCase_ : Optional[int] = True
elif in_class and in_func and (line.startswith(lowerCAmelCase__ ) or line.startswith(lowerCAmelCase__ )):
UpperCAmelCase_ : int = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ : int = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * ' '}{correct_line}''' )
UpperCAmelCase_ : Optional[int] = False
else:
new_lines.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''w''' ) as f:
for line in new_lines:
f.write(lowerCAmelCase__ )
def snake_case_ ( __lowercase , __lowercase=None ):
if fail is not None:
with open(lowerCAmelCase__ , '''r''' ) as f:
UpperCAmelCase_ : List[Any] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ : Tuple = None
with open(lowerCAmelCase__ , '''r''' ) as f:
UpperCAmelCase_ : str = f.readlines()
UpperCAmelCase_ : Tuple = defaultdict(lowerCAmelCase__ )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__UpperCamelCase : int = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 714
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 0
|
from __future__ import annotations
__UpperCamelCase : Optional[int] = "Muhammad Umer Farooq"
__UpperCamelCase : Optional[Any] = "MIT"
__UpperCamelCase : Any = "1.0.0"
__UpperCamelCase : Tuple = "Muhammad Umer Farooq"
__UpperCamelCase : Union[str, Any] = "contact@muhammadumerfarooq.me"
__UpperCamelCase : Union[str, Any] = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase__( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : str ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : list[str] = []
UpperCAmelCase_ : Any = domain
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCAmelCase_ : List[Any] = parse.urljoin(self.domain , _UpperCamelCase )
self.urls.append(_UpperCamelCase )
def snake_case_ ( __lowercase ):
return ".".join(get_sub_domain_name(__A ).split('''.''' )[-2:] )
def snake_case_ ( __lowercase ):
return parse.urlparse(__A ).netloc
def snake_case_ ( __lowercase = "https://github.com" ):
UpperCAmelCase_ : int = get_domain_name(__A )
# Initialize the parser
UpperCAmelCase_ : Union[str, Any] = Parser(__A )
try:
# Open URL
UpperCAmelCase_ : Optional[int] = requests.get(__A )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCAmelCase_ : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCAmelCase_ : Any = requests.get(__A )
# Get the valid email.
UpperCAmelCase_ : Optional[int] = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__A )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__A )
if __name__ == "__main__":
__UpperCamelCase : Dict = emails_from_url('https://github.com')
print(F'{len(emails)} emails found:')
print('\n'.join(sorted(emails)))
| 715
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
from math import isqrt
def snake_case_ ( __lowercase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(_lowerCamelCase ) + 1 ) )
def snake_case_ ( __lowercase = 1_0**6 ):
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : Union[str, Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_lowerCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 716
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 0
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
__UpperCamelCase : Any = logging.getLogger(__name__)
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = git.Repo(search_parent_directories=_lowerCamelCase )
UpperCAmelCase_ : Dict = {
"repo_id": str(_lowerCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(_lowerCamelCase , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=4 )
def snake_case_ ( __lowercase ):
if params.n_gpu <= 0:
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Tuple = -1
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Tuple = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase_ : Tuple = int(os.environ['''WORLD_SIZE'''] )
UpperCAmelCase_ : Dict = int(os.environ['''N_GPU_NODE'''] )
UpperCAmelCase_ : Optional[Any] = int(os.environ['''RANK'''] )
# number of nodes / node ID
UpperCAmelCase_ : Optional[Any] = params.world_size // params.n_gpu_per_node
UpperCAmelCase_ : Optional[int] = params.global_rank // params.n_gpu_per_node
UpperCAmelCase_ : List[Any] = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase_ : Any = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase_ : Union[str, Any] = params.n_nodes > 1
# summary
UpperCAmelCase_ : Optional[int] = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def snake_case_ ( __lowercase ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 0
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCamelCase : List[str] = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8000,
"sample_size": 13_1072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
}
def snake_case_ ( __lowercase : List[str] , __lowercase : Any ):
return torch.atana(a_ , a_ ) / math.pi * 2
def snake_case_ ( __lowercase : List[str] ):
UpperCAmelCase_ : List[str] = torch.sin(t * math.pi / 2 ) ** 2
UpperCAmelCase_ : Tuple = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(a_ , a_ )
class lowerCAmelCase__( _UpperCAmelCase ):
'''simple docstring'''
pass
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : Tuple ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Optional[Any] = DiffusionAttnUnetaD(lowerCamelCase_ , n_attn_layers=4 )
UpperCAmelCase_ : int = deepcopy(self.diffusion )
UpperCAmelCase_ : List[str] = torch.quasirandom.SobolEngine(1 , scramble=lowerCamelCase_ )
def snake_case_ ( __lowercase : Tuple ):
UpperCAmelCase_ : int = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__UpperCamelCase : str = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__UpperCamelCase : Optional[Any] = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__UpperCamelCase : Optional[int] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__UpperCamelCase : Union[str, Any] = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__UpperCamelCase : Dict = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__UpperCamelCase : List[Any] = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def snake_case_ ( __lowercase : Dict ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def snake_case_ ( __lowercase : str ):
for key, value in ATTN_MAP.items():
if name.startswith(a_ ) and not isinstance(a_ , a_ ):
return name.replace(a_ , a_ )
elif name.startswith(a_ ):
return [name.replace(a_ , a_ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def snake_case_ ( __lowercase : Dict , __lowercase : Optional[Any]=1_3 ):
UpperCAmelCase_ : Tuple = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
UpperCAmelCase_ : Tuple = 0
if string.startswith('''net.3.''' ):
depth += 1
UpperCAmelCase_ : Dict = string[6:]
elif string.startswith('''net.''' ):
UpperCAmelCase_ : Dict = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
UpperCAmelCase_ : List[str] = string[7:]
if string.startswith('''main.''' ):
UpperCAmelCase_ : str = string[5:]
# mid block
if string[:2].isdigit():
UpperCAmelCase_ : Any = string[:2]
UpperCAmelCase_ : List[str] = string[2:]
else:
UpperCAmelCase_ : List[str] = string[0]
UpperCAmelCase_ : Optional[Any] = string[1:]
if depth == max_depth:
UpperCAmelCase_ : Tuple = MID_NUM_TO_LAYER[layer_num]
UpperCAmelCase_ : Optional[Any] = '''mid_block'''
elif depth > 0 and int(a_ ) < 7:
UpperCAmelCase_ : str = DOWN_NUM_TO_LAYER[layer_num]
UpperCAmelCase_ : Any = F'''down_blocks.{depth}'''
elif depth > 0 and int(a_ ) > 7:
UpperCAmelCase_ : Tuple = UP_NUM_TO_LAYER[layer_num]
UpperCAmelCase_ : List[Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
UpperCAmelCase_ : Dict = DEPTH_0_TO_LAYER[layer_num]
UpperCAmelCase_ : Any = F'''up_blocks.{max_depth - 1}''' if int(a_ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
UpperCAmelCase_ : int = string_left[1:]
if "resnets" in new_layer:
UpperCAmelCase_ : List[str] = convert_resconv_naming(a_ )
elif "attentions" in new_layer:
UpperCAmelCase_ : Tuple = convert_attn_naming(a_ )
UpperCAmelCase_ : str = new_string_left
if not isinstance(a_ , a_ ):
UpperCAmelCase_ : int = prefix + '''.''' + new_layer + '''.''' + string_left
else:
UpperCAmelCase_ : List[Any] = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def snake_case_ ( __lowercase : Tuple ):
UpperCAmelCase_ : Optional[int] = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
UpperCAmelCase_ : Optional[int] = rename(a_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(a_ , a_ ):
UpperCAmelCase_ : Union[str, Any] = transform_conv_attns(a_ , a_ , a_ )
else:
UpperCAmelCase_ : Dict = v
return new_state_dict
def snake_case_ ( __lowercase : Any , __lowercase : Tuple , __lowercase : List[Any] ):
if len(a_ ) == 1:
if len(v.shape ) == 3:
# weight
UpperCAmelCase_ : Dict = v[:, :, 0]
else:
# bias
UpperCAmelCase_ : Dict = v
else:
# qkv matrices
UpperCAmelCase_ : List[Any] = v.shape[0]
UpperCAmelCase_ : Tuple = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCAmelCase_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCAmelCase_ : List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def snake_case_ ( __lowercase : Dict ):
UpperCAmelCase_ : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase_ : List[Any] = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
UpperCAmelCase_ : Tuple = download(a_ )
UpperCAmelCase_ : Union[str, Any] = MODELS_MAP[model_name]['''sample_rate''']
UpperCAmelCase_ : Optional[Any] = MODELS_MAP[model_name]['''sample_size''']
UpperCAmelCase_ : int = Object()
UpperCAmelCase_ : int = sample_size
UpperCAmelCase_ : Optional[int] = sample_rate
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = UNetaDModel(sample_size=a_ , sample_rate=a_ )
UpperCAmelCase_ : Optional[int] = diffusers_model.state_dict()
UpperCAmelCase_ : int = DiffusionUncond(a_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=a_ )['''state_dict'''] )
UpperCAmelCase_ : Tuple = orig_model.diffusion_ema.eval()
UpperCAmelCase_ : Optional[Any] = orig_model.state_dict()
UpperCAmelCase_ : List[str] = rename_orig_weights(a_ )
UpperCAmelCase_ : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCAmelCase_ : str = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(a_ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(a_ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
UpperCAmelCase_ : str = value.squeeze()
UpperCAmelCase_ : Dict = value
diffusers_model.load_state_dict(a_ )
UpperCAmelCase_ : Tuple = 1_0_0
UpperCAmelCase_ : int = 3_3
UpperCAmelCase_ : Optional[int] = IPNDMScheduler(num_train_timesteps=a_ )
UpperCAmelCase_ : Tuple = torch.manual_seed(a_ )
UpperCAmelCase_ : List[str] = torch.randn([1, 2, config.sample_size] , generator=a_ ).to(a_ )
UpperCAmelCase_ : Tuple = torch.linspace(1 , 0 , steps + 1 , device=a_ )[:-1]
UpperCAmelCase_ : str = get_crash_schedule(a_ )
UpperCAmelCase_ : Optional[Any] = DanceDiffusionPipeline(unet=a_ , scheduler=a_ )
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(3_3 )
UpperCAmelCase_ : str = pipe(num_inference_steps=a_ , generator=a_ ).audios
UpperCAmelCase_ : str = sampling.iplms_sample(a_ , a_ , a_ , {} )
UpperCAmelCase_ : int = generated.clamp(-1 , 1 )
UpperCAmelCase_ : Dict = (generated - audio).abs().sum()
UpperCAmelCase_ : Dict = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , a_ )
print('''Diff max''' , a_ )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__UpperCamelCase : Dict = parser.parse_args()
main(args)
| 718
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : int = tempfile.mkdtemp()
UpperCAmelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase_ : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'''do_convert_rgb''': True,
}
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase ( self : Tuple , **__snake_case : Any ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _lowerCamelCase ( self : str , **__snake_case : str ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _lowerCamelCase ( self : str , **__snake_case : Any ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ : Any = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Dict = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase )
UpperCAmelCase_ : Optional[int] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCamelCase )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
UpperCAmelCase_ : Optional[int] = self.get_image_processor(do_normalize=__UpperCamelCase )
UpperCAmelCase_ : Any = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=__UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase_ : str = self.prepare_image_inputs()
UpperCAmelCase_ : int = image_processor(__UpperCamelCase , return_tensors='''np''' )
UpperCAmelCase_ : List[str] = processor(images=__UpperCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : str = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
UpperCAmelCase_ : Optional[Any] = processor(text=__UpperCamelCase )
UpperCAmelCase_ : Any = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase_ : Optional[int] = '''Alexandra,T-shirt的价格是15便士。'''
UpperCAmelCase_ : Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase_ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(__UpperCamelCase )
UpperCAmelCase_ : List[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = '''Alexandra,T-shirt的价格是15便士。'''
UpperCAmelCase_ : int = self.prepare_image_inputs()
UpperCAmelCase_ : int = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
__UpperCamelCase : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : Dict = {}
with open(__lowercase , '''r''' ) as file:
for line_number, line in enumerate(__lowercase ):
UpperCAmelCase_ : Dict = line.strip()
if line:
UpperCAmelCase_ : Optional[Any] = line.split()
UpperCAmelCase_ : List[str] = line_number
UpperCAmelCase_ : Optional[Any] = words[0]
UpperCAmelCase_ : Union[str, Any] = value
return result
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
for attribute in key.split('''.''' ):
UpperCAmelCase_ : List[str] = getattr(__lowercase , __lowercase )
UpperCAmelCase_ : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowercase ):
UpperCAmelCase_ : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase_ : str = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase_ : Dict = getattr(__lowercase , __lowercase ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase_ : Tuple = hf_pointer
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase_ : Union[str, Any] = getattr(__lowercase , __lowercase )
UpperCAmelCase_ : int = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase_ : Tuple = value[0]
else:
UpperCAmelCase_ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase_ : List[str] = value
elif weight_type == "weight_g":
UpperCAmelCase_ : Optional[Any] = value
elif weight_type == "weight_v":
UpperCAmelCase_ : Any = value
elif weight_type == "bias":
UpperCAmelCase_ : List[str] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase_ : str = getattr(__lowercase , __lowercase )
UpperCAmelCase_ : str = value
else:
UpperCAmelCase_ : List[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowercase ):
UpperCAmelCase_ : Optional[Any] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase_ : Any = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase_ : Any = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase_ : int = '''.'''.join([key, hf_param_name] )
else:
UpperCAmelCase_ : List[Any] = key
UpperCAmelCase_ : List[Any] = value if '''lm_head''' in full_key else value[0]
__UpperCamelCase : Optional[int] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
UpperCAmelCase_ : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ : Any = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase_ : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase_ : Tuple = name.split(__lowercase )[0].split('''.''' )[-2]
UpperCAmelCase_ : Dict = mapped_key.replace('''*''' , __lowercase )
if "weight_g" in name:
UpperCAmelCase_ : Any = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ : Dict = '''weight_v'''
elif "bias" in name:
UpperCAmelCase_ : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ : List[Any] = '''weight'''
else:
UpperCAmelCase_ : Any = None
if hf_dict is not None:
rename_dict(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
else:
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return is_used
return is_used
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : str = []
UpperCAmelCase_ : int = fairseq_model.state_dict()
UpperCAmelCase_ : List[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ : Any = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase_ : List[Any] = True
else:
UpperCAmelCase_ : Dict = load_wavaveca_layer(__lowercase , __lowercase , __lowercase )
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Tuple = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase_ : List[str] = name.split('''.''' )
UpperCAmelCase_ : int = int(items[0] )
UpperCAmelCase_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase_ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase_ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase_ : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase_ : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=True , __lowercase=False ):
if config_path is not None:
UpperCAmelCase_ : Any = WavaVecaConfig.from_pretrained(__lowercase )
else:
UpperCAmelCase_ : int = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase_ : str = read_txt_into_dict(__lowercase )
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : Union[str, Any] = WavaVecaForSequenceClassification(__lowercase )
UpperCAmelCase_ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , )
feature_extractor.save_pretrained(__lowercase )
elif is_finetuned:
if dict_path:
UpperCAmelCase_ : Any = Dictionary.load(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ : Union[str, Any] = target_dict.pad_index
UpperCAmelCase_ : List[str] = target_dict.bos_index
UpperCAmelCase_ : Optional[Any] = target_dict.eos_index
UpperCAmelCase_ : Tuple = len(target_dict.symbols )
UpperCAmelCase_ : List[str] = os.path.join(__lowercase , '''vocab.json''' )
if not os.path.isdir(__lowercase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowercase ) )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
UpperCAmelCase_ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Any = 1
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__lowercase , __lowercase )
UpperCAmelCase_ : Optional[Any] = WavaVecaCTCTokenizer(
__lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowercase , )
UpperCAmelCase_ : List[str] = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase_ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , )
UpperCAmelCase_ : List[str] = WavaVecaProcessor(feature_extractor=__lowercase , tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
UpperCAmelCase_ : Tuple = WavaVecaForCTC(__lowercase )
else:
UpperCAmelCase_ : Dict = WavaVecaForPreTraining(__lowercase )
if is_finetuned or is_seq_class:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase_ : List[str] = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase_ : List[str] = fairseq.tasks.setup_task(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowercase )
UpperCAmelCase_ : List[Any] = model[0].eval()
recursively_load_weights(__lowercase , __lowercase , not is_finetuned )
hf_wavavec.save_pretrained(__lowercase )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 720
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 0
|
from __future__ import annotations
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : List[str] = sorted(numsa + numsa )
UpperCAmelCase_ : Dict = divmod(len(__UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : int = [float(x) for x in input('Enter the elements of first array: ').split()]
__UpperCamelCase : Dict = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 721
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 0
|
import copy
import re
class lowerCAmelCase__:
'''simple docstring'''
A_ : Tuple = '''hp'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = None
@classmethod
def _lowerCamelCase ( cls : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = prefix
UpperCAmelCase_ : List[Any] = defaults
cls.build_naming_info()
@staticmethod
def _lowerCamelCase ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
if len(A_ ) == 0:
return ""
UpperCAmelCase_ : Union[str, Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(A_ ) + 1 ):
UpperCAmelCase_ : List[Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ : str = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__snake_case : int ):
UpperCAmelCase_ : Tuple = ''''''
while integer != 0:
UpperCAmelCase_ : Dict = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ : List[str] = 0
while True:
UpperCAmelCase_ : Optional[int] = word + '''#''' + int_to_alphabetic(A_ )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ : Union[str, Any] = sword
break
UpperCAmelCase_ : Any = short_word
UpperCAmelCase_ : List[str] = word
return short_word
@staticmethod
def _lowerCamelCase ( __snake_case : Any , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = param_name.split('''_''' )
UpperCAmelCase_ : Any = [TrialShortNamer.shortname_for_word(A_ , A_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ : int = ['''''', '''_''']
for separator in separators:
UpperCAmelCase_ : Optional[int] = separator.join(A_ )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ : int = shortname
UpperCAmelCase_ : List[Any] = param_name
return shortname
return param_name
@staticmethod
def _lowerCamelCase ( __snake_case : List[Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = TrialShortNamer.shortname_for_key(A_ , A_ )
UpperCAmelCase_ : Any = short_name
UpperCAmelCase_ : int = param_name
@classmethod
def _lowerCamelCase ( cls : Any ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ : Union[str, Any] = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
UpperCAmelCase_ : List[str] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A_ , A_ )
UpperCAmelCase_ : Optional[Any] = info
@classmethod
def _lowerCamelCase ( cls : Any , __snake_case : List[str] ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ : Tuple = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ : str = cls.NAMING_INFO['''short_param'''][k]
if isinstance(A_ , A_ ):
UpperCAmelCase_ : Optional[int] = 1 if v else 0
UpperCAmelCase_ : Dict = '''''' if isinstance(A_ , (int, float) ) else '''-'''
UpperCAmelCase_ : Dict = f'''{key}{sep}{v}'''
name.append(A_ )
return "_".join(A_ )
@classmethod
def _lowerCamelCase ( cls : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ : Tuple = []
else:
UpperCAmelCase_ : Union[str, Any] = repr.split('''_''' )
UpperCAmelCase_ : Tuple = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = value.split('''-''' )
else:
UpperCAmelCase_ : Any = re.sub('''[0-9.]''' , '''''' , A_ )
UpperCAmelCase_ : Dict = float(re.sub('''[^0-9.]''' , '''''' , A_ ) )
UpperCAmelCase_ : Optional[Any] = cls.NAMING_INFO['''reverse_short_param'''][p_k]
UpperCAmelCase_ : Dict = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ : str = cls.DEFAULTS[k]
return parameters
| 700
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger()
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : nn.Module
A_ : List[nn.Module] = field(default_factory=lowercase__ )
A_ : list = field(default_factory=lowercase__ )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = len(list(m.modules() ) ) == 1 or isinstance(__snake_case , nn.Convad ) or isinstance(__snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__snake_case )
def __call__( self : str , __snake_case : Any ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : nn.Module
A_ : nn.Module
A_ : int = 1
A_ : List = field(default_factory=lowercase__ )
A_ : List = field(default_factory=lowercase__ )
A_ : bool = True
def __call__( self : Dict , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = Tracker(self.dest )(__snake_case ).parametrized
UpperCAmelCase_ : int = Tracker(self.src )(__snake_case ).parametrized
UpperCAmelCase_ : Optional[int] = list(filter(lambda __snake_case : type(__snake_case ) not in self.src_skip , __snake_case ) )
UpperCAmelCase_ : Tuple = list(filter(lambda __snake_case : type(__snake_case ) not in self.dest_skip , __snake_case ) )
if len(__snake_case ) != len(__snake_case ) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(__snake_case )} operations while'''
f''' destination module has {len(__snake_case )}.''' )
for dest_m, src_m in zip(__snake_case , __snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f'''Unexpected layer name {k}'''
UpperCAmelCase_ : int = len(__snake_case ) + 1
feature_blocks.append((f'''res{block_index}''', v) )
UpperCAmelCase_ : Union[str, Any] = nn.ModuleDict(__snake_case )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Dict ):
'''simple docstring'''
return get_trunk_forward_outputs(
__snake_case , out_feat_keys=__snake_case , feature_blocks=self._feature_blocks , )
class lowerCAmelCase__( lowercase__ ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[int] , __snake_case : str ):
'''simple docstring'''
# default to timm!
if x not in self:
UpperCAmelCase_ : Any = self.convert_name_to_timm(__snake_case )
UpperCAmelCase_ : Union[str, Any] = partial(lambda: (timm.create_model(__snake_case , pretrained=__snake_case ).eval(), None) )
else:
UpperCAmelCase_ : Optional[Any] = super().__getitem__(__snake_case )
return val
class lowerCAmelCase__( lowercase__ ):
'''simple docstring'''
def __getitem__( self : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ : int = RegNetModel
else:
UpperCAmelCase_ : int = RegNetForImageClassification
return val
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
for from_key, to_key in keys:
UpperCAmelCase_ : Dict = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ):
print(F'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ : Any = from_model_func()
UpperCAmelCase_ : Dict = our_model_func(__SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ : Tuple = ModuleTransfer(src=__SCREAMING_SNAKE_CASE , dest=__SCREAMING_SNAKE_CASE , raise_if_mismatch=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
UpperCAmelCase_ : int = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : Any = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
UpperCAmelCase_ : List[Any] = manually_copy_vissl_head(__SCREAMING_SNAKE_CASE , our_model.state_dict() , __SCREAMING_SNAKE_CASE )
our_model.load_state_dict(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = our_model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = (
our_outputs.logits if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ : Dict = from_model(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = from_output[-1] if type(__SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ : Tuple = our_outputs.hidden_states[-1]
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : Tuple = 2_2_4 if '''seer''' not in name else 3_8_4
# we can use the convnext one
UpperCAmelCase_ : int = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
print(F'''Pushed {name}''' )
def snake_case_ ( __lowercase , __lowercase = None , __lowercase = True ):
UpperCAmelCase_ : List[Any] = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : Union[str, Any] = 1_0_0_0
UpperCAmelCase_ : int = (1, num_labels)
UpperCAmelCase_ : int = '''huggingface/label-files'''
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : List[Any] = json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase_ : str = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = partial(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
UpperCAmelCase_ : str = NameToOurModelFuncMap()
UpperCAmelCase_ : Any = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowercase , __lowercase ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ : Tuple = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , model_dir=str(__SCREAMING_SNAKE_CASE ) , map_location='''cpu''' )
UpperCAmelCase_ : List[Any] = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ : Optional[int] = files['''classy_state_dict''']['''base_model''']['''model''']
UpperCAmelCase_ : Any = model_state_dict['''trunk''']
model.load_state_dict(__SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ : List[Any] = partial(
__SCREAMING_SNAKE_CASE , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Optional[int] = partial(
__SCREAMING_SNAKE_CASE , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Union[str, Any] = partial(
__SCREAMING_SNAKE_CASE , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : Any = partial(
__SCREAMING_SNAKE_CASE , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
UpperCAmelCase_ : int = partial(
__SCREAMING_SNAKE_CASE , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : Union[str, Any] = partial(
__SCREAMING_SNAKE_CASE , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ : int = partial(
__SCREAMING_SNAKE_CASE , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ : Optional[Any] = partial(
__SCREAMING_SNAKE_CASE , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
__SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
__UpperCamelCase : Dict = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 701
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 0
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for part_id in partition_order:
UpperCAmelCase_ : List[str] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(__lowercase ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : Optional[int] = spark.range(1_0_0 ).repartition(1 )
UpperCAmelCase_ : int = Spark(__lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
UpperCAmelCase_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : Optional[Any] = spark.range(1_0 ).repartition(2 )
UpperCAmelCase_ : Optional[Any] = [1, 0]
UpperCAmelCase_ : Union[str, Any] = _generate_iterable_examples(__lowercase , __lowercase ) # Reverse the partitions.
UpperCAmelCase_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowercase , __lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
UpperCAmelCase_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : int = spark.range(1_0 ).repartition(1 )
UpperCAmelCase_ : Tuple = SparkExamplesIterable(__lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__lowercase ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
UpperCAmelCase_ : Dict = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : List[Any] = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
UpperCAmelCase_ : Union[str, Any] = lambda __lowercase : x.reverse()
UpperCAmelCase_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowercase , [2, 1, 0] )
UpperCAmelCase_ : str = SparkExamplesIterable(__lowercase ).shuffle_data_sources(__lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
UpperCAmelCase_ : str = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : str = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase_ : Optional[int] = SparkExamplesIterable(__lowercase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowercase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase_ : List[str] = SparkExamplesIterable(__lowercase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowercase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__lowercase ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ( ):
UpperCAmelCase_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase_ : int = spark.range(1_0_0 ).repartition(1 )
UpperCAmelCase_ : Union[str, Any] = Spark(__lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 702
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase__( UpperCamelCase_ ):
'''simple docstring'''
@slow
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
UpperCAmelCase_ : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase_ : Any = bertabert.config.encoder.vocab_size
UpperCAmelCase_ : List[str] = tokenizer.sep_token_id
UpperCAmelCase_ : Any = tokenizer.cls_token_id
UpperCAmelCase_ : Tuple = 128
UpperCAmelCase_ : str = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
UpperCAmelCase_ : List[str] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
UpperCAmelCase_ : Optional[int] = train_dataset.select(range(32 ) )
UpperCAmelCase_ : Tuple = val_dataset.select(range(16 ) )
UpperCAmelCase_ : List[str] = 4
def _map_to_encoder_decoder_inputs(__snake_case : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase_ : Dict = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__A , max_length=512 )
UpperCAmelCase_ : List[str] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__A , max_length=128 )
UpperCAmelCase_ : str = inputs.input_ids
UpperCAmelCase_ : int = inputs.attention_mask
UpperCAmelCase_ : Tuple = outputs.input_ids
UpperCAmelCase_ : Dict = outputs.input_ids.copy()
UpperCAmelCase_ : Optional[Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCAmelCase_ : List[Any] = outputs.attention_mask
assert all(len(__A ) == 512 for x in inputs.input_ids )
assert all(len(__A ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__snake_case : Union[str, Any] ):
UpperCAmelCase_ : Dict = pred.label_ids
UpperCAmelCase_ : Any = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase_ : str = tokenizer.batch_decode(__A , skip_special_tokens=__A )
UpperCAmelCase_ : Any = tokenizer.batch_decode(__A , skip_special_tokens=__A )
UpperCAmelCase_ : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__A ) )] ) / len(__A )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase_ : Tuple = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
UpperCAmelCase_ : int = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
UpperCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : List[str] = SeqaSeqTrainingArguments(
output_dir=__A , per_device_train_batch_size=__A , per_device_eval_batch_size=__A , predict_with_generate=__A , evaluation_strategy='''steps''' , do_train=__A , do_eval=__A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase_ : Dict = SeqaSeqTrainer(
model=__A , args=__A , compute_metrics=_compute_metrics , train_dataset=__A , eval_dataset=__A , tokenizer=__A , )
# start training
trainer.train()
| 703
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 0
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 0
if start < end:
UpperCAmelCase_ : Union[str, Any] = randint(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase_ : str = a[end]
UpperCAmelCase_ : List[str] = a[pivot]
UpperCAmelCase_ : Tuple = temp
UpperCAmelCase_ : int = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 )
count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase )
return count
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Optional[int] = randint(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase_ : List[Any] = a[end]
UpperCAmelCase_ : Optional[int] = a[pivot]
UpperCAmelCase_ : List[Any] = temp
UpperCAmelCase_ : Union[str, Any] = start - 1
for index in range(_lowerCamelCase , _lowerCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCAmelCase_ : Optional[int] = new_pivot_index + 1
UpperCAmelCase_ : Optional[Any] = a[new_pivot_index]
UpperCAmelCase_ : Tuple = a[index]
UpperCAmelCase_ : Any = temp
UpperCAmelCase_ : List[Any] = a[new_pivot_index + 1]
UpperCAmelCase_ : Dict = a[end]
UpperCAmelCase_ : Optional[Any] = temp
return new_pivot_index + 1, count
__UpperCamelCase : str = TemporaryFile()
__UpperCamelCase : List[Any] = 100 # 1000 elements are to be sorted
__UpperCamelCase : Dict = 0, 1 # mean and standard deviation
__UpperCamelCase : str = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
__UpperCamelCase : Tuple = np.load(outfile)
__UpperCamelCase : int = len(M) - 1
__UpperCamelCase : Tuple = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 704
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
UpperCAmelCase_ : Dict = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_a , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def _lowerCamelCase ( self : Tuple , __snake_case : Tuple ):
'''simple docstring'''
TrainingJobAnalytics(_a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def _lowerCamelCase ( self : Any , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase_ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase_ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 706
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 0
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__UpperCamelCase : List[Any] = '\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n'
__UpperCamelCase : Optional[int] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__UpperCamelCase : List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _lowerCamelCase ( self : int , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str]=None , __snake_case : List[str]=True , __snake_case : List[Any]=False ):
'''simple docstring'''
if rouge_types is None:
UpperCAmelCase_ : str = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
UpperCAmelCase_ : Dict = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase , use_stemmer=__lowerCAmelCase )
if use_aggregator:
UpperCAmelCase_ : List[str] = scoring.BootstrapAggregator()
else:
UpperCAmelCase_ : Tuple = []
for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase_ : Union[str, Any] = scorer.score(__lowerCAmelCase , __lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(__lowerCAmelCase )
else:
scores.append(__lowerCAmelCase )
if use_aggregator:
UpperCAmelCase_ : str = aggregator.aggregate()
else:
UpperCAmelCase_ : Any = {}
for key in scores[0]:
UpperCAmelCase_ : Tuple = [score[key] for score in scores]
return result
| 707
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 0
|
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase__( _UpperCAmelCase ):
'''simple docstring'''
A_ : Optional[Any] = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
A_ : List[str] = 'CIDAS/clipseg-rd64-refined'
A_ : Union[str, Any] = 'image_segmenter'
A_ : Union[str, Any] = CLIPSegForImageSegmentation
A_ : Any = ['image', 'text']
A_ : Any = ['image']
def __init__( self : Any , *__snake_case : Optional[int] , **__snake_case : int ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*lowercase_ , **lowercase_ )
def _lowerCamelCase ( self : Optional[int] , __snake_case : "Image" , __snake_case : str ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowercase_ , return_tensors='''pt''' )
def _lowerCamelCase ( self : Any , __snake_case : Dict ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = self.model(**lowercase_ ).logits
return logits
def _lowerCamelCase ( self : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 708
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = 'token-classification'
def __init__( self : Any , __snake_case : Optional[Any] ):
'''simple docstring'''
if type(__snake_case ) == dict:
UpperCAmelCase_ : Tuple = Namespace(**__snake_case )
UpperCAmelCase_ : Dict = import_module('''tasks''' )
try:
UpperCAmelCase_ : int = getattr(__snake_case , hparams.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCAmelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase_ : Dict = CrossEntropyLoss().ignore_index
super().__init__(__snake_case , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Optional[int] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return self.model(**__snake_case )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : str = self(**__snake_case )
UpperCAmelCase_ : Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase_ : Optional[Any] = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Any = torch.load(__snake_case )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCAmelCase_ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __snake_case )
UpperCAmelCase_ : List[str] = self.token_classification_task.convert_examples_to_features(
__snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(__snake_case , __snake_case )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int , __snake_case : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._feature_file(__snake_case )
logger.info('''Loading features from cached file %s''' , __snake_case )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case )
UpperCAmelCase_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase_ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase_ : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase_ : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case )
def _lowerCamelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
"""Compute validation""" ""
UpperCAmelCase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase_ : int = self(**__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs[:2]
UpperCAmelCase_ : Optional[int] = logits.detach().cpu().numpy()
UpperCAmelCase_ : List[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCAmelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Any = np.argmax(__snake_case , axis=2 )
UpperCAmelCase_ : int = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCAmelCase_ : Tuple = dict(enumerate(self.labels ) )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase_ : Union[str, Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__snake_case , __snake_case ),
'''precision''': precision_score(__snake_case , __snake_case ),
'''recall''': recall_score(__snake_case , __snake_case ),
'''f1''': fa_score(__snake_case , __snake_case ),
}
UpperCAmelCase_ : str = dict(results.items() )
UpperCAmelCase_ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : List[str] , __snake_case : int ):
'''simple docstring'''
# when stable
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._eval_end(__snake_case )
UpperCAmelCase_ : int = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._eval_end(__snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase_ : Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
# Add NER specific options
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__snake_case , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__snake_case , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__snake_case , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Optional[Any] = NERTransformer(args)
__UpperCamelCase : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
__UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 641
| 0
|
from __future__ import annotations
from collections import namedtuple
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ : int = kwargs.pop('''encoder''' )
UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' )
UpperCAmelCase_ : int = kwargs.pop('''decoder''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : List[Any] = True
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.encoder.to_dict()
UpperCAmelCase_ : Tuple = self.decoder.to_dict()
UpperCAmelCase_ : Tuple = self.__class__.model_type
return output
| 641
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__( UpperCAmelCase__ ):
'''simple docstring'''
A_ : List[str] = ['pixel_values']
def __init__( self : Dict , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 255 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = True , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ : List[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ : Optional[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name='''crop_size''' )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Tuple = resample
UpperCAmelCase_ : Optional[int] = do_center_crop
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor
UpperCAmelCase_ : Dict = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : str = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : str = do_convert_rgb
def _lowerCamelCase ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : int = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(__lowerCAmelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self : Dict , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : str = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self : int , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ):
'''simple docstring'''
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self : Any , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : int , ):
'''simple docstring'''
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : int = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , **__snake_case : Tuple , ):
'''simple docstring'''
UpperCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = size if size is not None else self.size
UpperCAmelCase_ : Union[str, Any] = get_size_dict(__lowerCAmelCase , param_name='''size''' , default_to_square=__lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Union[str, Any] = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' , default_to_square=__lowerCAmelCase )
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : str = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : str = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : Optional[int] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ : List[str] = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ : List[Any] = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ : Optional[int] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ : Tuple = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
UpperCAmelCase_ : List[str] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
UpperCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 710
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def snake_case_ ( __lowercase ):
if not is_accelerate_available():
return method
UpperCAmelCase_ : List[str] = version.parse(accelerate.__version__ ).base_version
if version.parse(_snake_case ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *__lowercase , **__lowercase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_snake_case , **_snake_case )
return wrapper
| 711
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : int = multiprocessing.Manager()
UpperCAmelCase_ : Union[str, Any] = manager.list()
UpperCAmelCase_ : int = multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase_ : str = shutil.rmtree
UpperCAmelCase_ : Tuple = os.rmdir
UpperCAmelCase_ : Dict = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase_ : Optional[int] = {}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase_ : Optional[int] = rmtree
UpperCAmelCase_ : Optional[Any] = rmdir
UpperCAmelCase_ : Optional[Any] = chdir
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ( ):
UpperCAmelCase_ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def snake_case_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase__( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self : Dict , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Dict , *__snake_case : int , **__snake_case : Any ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ):
'''simple docstring'''
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return False
class lowerCAmelCase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
A_ : Optional[Any] = 'stdin'
@contextlib.contextmanager
def snake_case_ ( __lowercase ):
if root == ".":
yield
return
UpperCAmelCase_ : Tuple = os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def snake_case_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
import os
UpperCAmelCase_ : Union[str, Any] = '''1'''
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Any = None
import shutil
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
import subprocess
UpperCAmelCase_ : Dict = None # type: ignore
UpperCAmelCase_ : Union[str, Any] = None
import sys
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
| 641
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( UpperCamelCase_ ):
'''simple docstring'''
A_ : Union[str, Any] = 'unispeech-sat'
def __init__( self : str , __snake_case : Optional[Any]=32 , __snake_case : Union[str, Any]=768 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[Any]=12 , __snake_case : str=3_072 , __snake_case : Any="gelu" , __snake_case : Dict=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[Any]=0.0 , __snake_case : int=0.0 , __snake_case : Union[str, Any]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Optional[int]=0.02 , __snake_case : str=1E-5 , __snake_case : int="group" , __snake_case : List[Any]="gelu" , __snake_case : Tuple=(512, 512, 512, 512, 512, 512, 512) , __snake_case : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , __snake_case : str=False , __snake_case : List[Any]=128 , __snake_case : Union[str, Any]=16 , __snake_case : int=False , __snake_case : Union[str, Any]=True , __snake_case : Dict=0.05 , __snake_case : Any=10 , __snake_case : Optional[Any]=2 , __snake_case : List[str]=0.0 , __snake_case : int=10 , __snake_case : Optional[Any]=0 , __snake_case : List[str]=320 , __snake_case : Dict=2 , __snake_case : str=0.1 , __snake_case : Any=100 , __snake_case : Union[str, Any]=256 , __snake_case : str=256 , __snake_case : List[Any]=0.1 , __snake_case : Dict="mean" , __snake_case : Dict=False , __snake_case : List[str]=False , __snake_case : str=256 , __snake_case : Optional[int]=(512, 512, 512, 512, 1_500) , __snake_case : Optional[Any]=(5, 3, 3, 1, 1) , __snake_case : Tuple=(1, 2, 3, 1, 1) , __snake_case : Any=512 , __snake_case : List[str]=0 , __snake_case : Tuple=1 , __snake_case : Tuple=2 , __snake_case : List[str]=504 , **__snake_case : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : Any = feat_extract_activation
UpperCAmelCase_ : Dict = list(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = list(UpperCamelCase__ )
UpperCAmelCase_ : Dict = list(UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = conv_bias
UpperCAmelCase_ : Dict = num_conv_pos_embeddings
UpperCAmelCase_ : Optional[int] = num_conv_pos_embedding_groups
UpperCAmelCase_ : int = len(self.conv_dim )
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Tuple = hidden_dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : Dict = activation_dropout
UpperCAmelCase_ : Any = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : Optional[int] = layerdrop
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[str] = num_clusters
UpperCAmelCase_ : str = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : str = apply_spec_augment
UpperCAmelCase_ : List[str] = mask_time_prob
UpperCAmelCase_ : Optional[int] = mask_time_length
UpperCAmelCase_ : int = mask_time_min_masks
UpperCAmelCase_ : Optional[int] = mask_feature_prob
UpperCAmelCase_ : Optional[Any] = mask_feature_length
UpperCAmelCase_ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Dict = num_codevectors_per_group
UpperCAmelCase_ : str = num_codevector_groups
UpperCAmelCase_ : Optional[Any] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : Any = num_negatives
UpperCAmelCase_ : Optional[Any] = codevector_dim
UpperCAmelCase_ : Union[str, Any] = proj_codevector_dim
UpperCAmelCase_ : Optional[int] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Optional[Any] = ctc_loss_reduction
UpperCAmelCase_ : Tuple = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : int = list(UpperCamelCase__ )
UpperCAmelCase_ : int = list(UpperCamelCase__ )
UpperCAmelCase_ : Union[str, Any] = list(UpperCamelCase__ )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'falcon'
A_ : int = ['past_key_values']
def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case )
UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : Tuple = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Optional[int] = alibi
UpperCAmelCase_ : Dict = new_decoder_architecture
UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : Tuple = parallel_attn
UpperCAmelCase_ : List[Any] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return not self.alibi
| 641
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : List[str] , __snake_case : str , __snake_case : Dict , __snake_case : List[str] , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Dict , __snake_case : Optional[Any] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : Union[str, Any] , __snake_case : List[str] , __snake_case : List[str] = 512 , __snake_case : Tuple = 512 , __snake_case : Union[str, Any] = 50 , __snake_case : Any = 7.5 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : int = 0.0 , __snake_case : Optional[int] = None , __snake_case : List[Any] = None , __snake_case : Union[str, Any] = "pil" , __snake_case : int = True , __snake_case : Dict = None , __snake_case : Tuple = 1 , __snake_case : Union[str, Any] = None , **__snake_case : Union[str, Any] , ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_SCREAMING_SNAKE_CASE )}.''' )
# get prompt text embeddings
UpperCAmelCase_ : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase_ : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase_ : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase_ : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = text_embeddings.shape
UpperCAmelCase_ : Optional[int] = text_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
UpperCAmelCase_ : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ : str = 42
if negative_prompt is None:
UpperCAmelCase_ : Tuple = ['''''']
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !='''
f''' {type(_SCREAMING_SNAKE_CASE )}.''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase_ : Dict = negative_prompt
UpperCAmelCase_ : Dict = text_input_ids.shape[-1]
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
UpperCAmelCase_ : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : List[str] = uncond_embeddings.shape[1]
UpperCAmelCase_ : Dict = uncond_embeddings.repeat(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
UpperCAmelCase_ : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase_ : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase_ : List[str] = torch.randn(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device='''cpu''' , dtype=_SCREAMING_SNAKE_CASE ).to(self.device )
UpperCAmelCase_ : Union[str, Any] = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device='''cpu''' , dtype=_SCREAMING_SNAKE_CASE ).to(
self.device )
else:
UpperCAmelCase_ : str = torch.randn(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase_ : Optional[int] = latents_reference.to(self.device )
UpperCAmelCase_ : List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase_ : Optional[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase_ : Dict = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase_ : Dict = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase_ : List[str] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase_ : Tuple = 0 if dx < 0 else dx
UpperCAmelCase_ : Tuple = 0 if dy < 0 else dy
UpperCAmelCase_ : List[Any] = max(-dx , 0 )
UpperCAmelCase_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase_ : Optional[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase_ : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ : Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ : Dict = {}
if accepts_eta:
UpperCAmelCase_ : Optional[Any] = eta
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Any = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# predict the noise residual
UpperCAmelCase_ : List[Any] = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Optional[int] = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = 1 / 0.18_215 * latents
UpperCAmelCase_ : List[str] = self.vae.decode(_SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase_ : int = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase_ : str = self.feature_extractor(self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).to(
self.device )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.safety_checker(
images=_SCREAMING_SNAKE_CASE , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase_ : List[str] = None
if output_type == "pil":
UpperCAmelCase_ : Any = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_SCREAMING_SNAKE_CASE , nsfw_content_detected=_SCREAMING_SNAKE_CASE )
| 713
|
def snake_case_ ( __lowercase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 641
| 0
|
'''simple docstring'''
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[int] , __snake_case : list ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = set_counts
UpperCAmelCase_ : Union[str, Any] = max(__snake_case )
UpperCAmelCase_ : List[Any] = len(__snake_case )
UpperCAmelCase_ : Optional[Any] = [1] * num_sets
UpperCAmelCase_ : Union[str, Any] = list(range(__snake_case ) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.get_parent(__snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_parent(__snake_case )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCAmelCase_ : Any = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : List[str] = src_parent
UpperCAmelCase_ : Union[str, Any] = self.set_counts[src_parent]
UpperCAmelCase_ : Tuple = max(self.max_set , __snake_case )
return True
def _lowerCamelCase ( self : Dict , __snake_case : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
UpperCAmelCase_ : Any = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 714
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 641
| 0
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : int = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__UpperCamelCase : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class lowerCAmelCase__( lowercase__ ):
'''simple docstring'''
A_ : int = VOCAB_FILES_NAMES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = LEDTokenizer
A_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , __snake_case : Optional[Any]=None , __snake_case : Dict=None , __snake_case : Tuple=None , __snake_case : Union[str, Any]="replace" , __snake_case : Tuple="<s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Tuple="</s>" , __snake_case : List[str]="<s>" , __snake_case : Tuple="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Dict="<mask>" , __snake_case : Any=False , __snake_case : Any=True , **__snake_case : List[Any] , ):
'''simple docstring'''
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase , **__lowercase , )
UpperCAmelCase_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space:
UpperCAmelCase_ : int = getattr(__lowercase , pre_tok_state.pop('''type''' ) )
UpperCAmelCase_ : Tuple = add_prefix_space
UpperCAmelCase_ : List[Any] = pre_tok_class(**__lowercase )
UpperCAmelCase_ : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_ : Tuple = '''post_processor'''
UpperCAmelCase_ : str = getattr(self.backend_tokenizer , __lowercase , __lowercase )
if tokenizer_component_instance:
UpperCAmelCase_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ : List[str] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase_ : Tuple = tuple(state['''cls'''] )
UpperCAmelCase_ : Tuple = False
if state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space:
UpperCAmelCase_ : str = add_prefix_space
UpperCAmelCase_ : Union[str, Any] = True
if state.get('''trim_offsets''' , __lowercase ) != trim_offsets:
UpperCAmelCase_ : int = trim_offsets
UpperCAmelCase_ : Union[str, Any] = True
if changes_to_apply:
UpperCAmelCase_ : str = getattr(__lowercase , state.pop('''type''' ) )
UpperCAmelCase_ : Optional[Any] = component_class(**__lowercase )
setattr(self.backend_tokenizer , __lowercase , __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self : str ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self : Optional[int] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else value
UpperCAmelCase_ : List[str] = value
def _lowerCamelCase ( self : Any , *__snake_case : List[Any] , **__snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = kwargs.get('''is_split_into_words''' , __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def _lowerCamelCase ( self : int , *__snake_case : Union[str, Any] , **__snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = kwargs.get('''is_split_into_words''' , __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowercase , **__lowercase )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def _lowerCamelCase ( self : List[str] , __snake_case : int , __snake_case : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = super()._pad(
encoded_inputs=__lowercase , max_length=__lowercase , padding_strategy=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : Any = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : Union[str, Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(__lowercase )
if needs_to_be_padded:
UpperCAmelCase_ : Any = len(__lowercase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : Dict = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : Dict = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 715
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
__UpperCamelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase_ : List[Any] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {', '.join(_lowerCAmelCase )}'''
)
raise ValueError(_lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = data
UpperCAmelCase_ : List[Any] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ : Union[str, Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Any = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.padding()
UpperCAmelCase_ : str = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ : Any = self.expand_block(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ : Optional[Any] = (b & c) | ((~b) & d)
UpperCAmelCase_ : Optional[Any] = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ : List[Any] = b ^ c ^ d
UpperCAmelCase_ : str = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ : str = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ : Optional[int] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ : Union[str, Any] = b ^ c ^ d
UpperCAmelCase_ : Dict = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase_ : Optional[Any] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ):
UpperCAmelCase_ : Tuple = B'''Test String'''
assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324
def snake_case_ ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase_ : List[str] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__lowercase , '''utf-8''' )
print(SHAaHash(__lowercase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 641
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : Optional[Any] , __snake_case : int=2 , __snake_case : Optional[Any]=3 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]=2 , __snake_case : str=7 , __snake_case : Optional[Any]=True , __snake_case : int=True , __snake_case : List[Any]=True , __snake_case : Optional[int]=True , __snake_case : Tuple=99 , __snake_case : Any=36 , __snake_case : Dict=3 , __snake_case : List[Any]=4 , __snake_case : Optional[Any]=37 , __snake_case : str="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=512 , __snake_case : Tuple=16 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : Dict=6 , __snake_case : int=6 , __snake_case : List[str]=3 , __snake_case : Optional[Any]=4 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=1_000 , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : List[str] = text_seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Union[str, Any] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : List[str] = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = coordinate_size
UpperCAmelCase_ : int = shape_size
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase_ : List[Any] = text_seq_length
UpperCAmelCase_ : str = (image_size // patch_size) ** 2 + 1
UpperCAmelCase_ : str = self.text_seq_length + self.image_seq_length
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : int = bbox[i, j, 3]
UpperCAmelCase_ : Any = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : Dict = bbox[i, j, 2]
UpperCAmelCase_ : Dict = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCamelCase ( self : Any , __snake_case : Dict , __snake_case : Tuple , __snake_case : Dict , __snake_case : str , __snake_case : int , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = LayoutLMvaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
# text + image
UpperCAmelCase_ : Dict = model(__snake_case , pixel_values=__snake_case )
UpperCAmelCase_ : str = model(
__snake_case , bbox=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase_ : str = model(__snake_case , bbox=__snake_case , pixel_values=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase_ : Tuple = model(__snake_case , bbox=__snake_case , pixel_values=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase_ : Optional[Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase_ : Dict = model(pixel_values=__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : Any = LayoutLMvaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
__snake_case , bbox=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Any , __snake_case : Dict , __snake_case : List[Any] , __snake_case : str , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Any = LayoutLMvaForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : str = model(
__snake_case , bbox=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , __snake_case : Tuple , __snake_case : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = LayoutLMvaForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : str = model(
__snake_case , bbox=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = False
A_ : List[Any] = False
A_ : Optional[Any] = False
A_ : Optional[int] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : List[str] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Any ):
'''simple docstring'''
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = LayoutLMvaModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[Any] , __snake_case : Tuple=False ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = copy.deepcopy(__snake_case )
if model_class in get_values(__snake_case ):
UpperCAmelCase_ : int = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__snake_case , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__snake_case ):
UpperCAmelCase_ : int = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
elif model_class in get_values(__snake_case ):
UpperCAmelCase_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
elif model_class in [
*get_values(__snake_case ),
]:
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
elif model_class in [
*get_values(__snake_case ),
]:
UpperCAmelCase_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__snake_case , )
return inputs_dict
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Any = type
self.model_tester.create_and_check_model(*__snake_case )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = LayoutLMvaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( ):
UpperCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__snake_case ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(__snake_case )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Optional[int] = image_processor(images=__snake_case , return_tensors='''pt''' ).pixel_values.to(__snake_case )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[1, 2]] )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase_ : Dict = model(
input_ids=input_ids.to(__snake_case ) , bbox=bbox.to(__snake_case ) , pixel_values=pixel_values.to(__snake_case ) , )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __snake_case )
UpperCAmelCase_ : int = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ) )
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = 'timesformer'
def __init__( self : int , __snake_case : Any=224 , __snake_case : str=16 , __snake_case : Any=3 , __snake_case : List[Any]=8 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Dict=3_072 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=1E-6 , __snake_case : List[Any]=True , __snake_case : List[str]="divided_space_time" , __snake_case : Optional[int]=0 , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Dict = attention_type
UpperCAmelCase_ : str = drop_path_rate
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Any = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
import math
import qiskit
def snake_case_ ( __lowercase = 1 , __lowercase = 1 , __lowercase = 1 ):
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase_ : Any = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase_ : List[str] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase_ : Any = [input_a, input_a, carry_in]
UpperCAmelCase_ : Dict = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
UpperCAmelCase_ : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase_ : List[str] = qiskit.execute(__lowercase , __lowercase , shots=1_0_0_0 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[str] = 'gpt_bigcode'
A_ : Optional[Any] = ['past_key_values']
A_ : Optional[int] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : Dict=50_257 , __snake_case : List[str]=1_024 , __snake_case : Dict=768 , __snake_case : Optional[int]=12 , __snake_case : str=12 , __snake_case : List[str]=None , __snake_case : List[str]="gelu_pytorch_tanh" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=50_256 , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=True , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : Union[str, Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = attn_pdrop
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_softmax_in_fpaa
UpperCAmelCase_ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Optional[int] = multi_query
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : str = {}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
A_ : Tuple = '''llama'''
A_ : Dict = ['''past_key_values''']
def __init__( self : Optional[Any] , __snake_case : Union[str, Any]=32_000 , __snake_case : Optional[Any]=4_096 , __snake_case : List[Any]=11_008 , __snake_case : List[str]=32 , __snake_case : List[Any]=32 , __snake_case : List[Any]=None , __snake_case : int="silu" , __snake_case : List[Any]=2_048 , __snake_case : List[str]=0.02 , __snake_case : Optional[int]=1E-6 , __snake_case : Any=True , __snake_case : Optional[Any]=0 , __snake_case : List[str]=1 , __snake_case : int=2 , __snake_case : List[str]=1 , __snake_case : List[Any]=False , __snake_case : str=None , **__snake_case : List[Any] , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : int = num_key_value_heads
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : List[Any] = rms_norm_eps
UpperCAmelCase_ : List[Any] = pretraining_tp
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
UpperCAmelCase_ : Tuple = self.rope_scaling.get('''type''' , __a )
UpperCAmelCase_ : str = self.rope_scaling.get('''factor''' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 720
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 641
| 0
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : int = logging.get_logger(__name__)
class lowerCAmelCase__( enum.Enum ):
'''simple docstring'''
A_ : Union[str, Any] = 0
A_ : Optional[int] = 1
@add_end_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'generated'
def __init__( self : str , *__snake_case : Union[str, Any] , **__snake_case : Optional[int] ):
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[Any]=None , __snake_case : Tuple=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : List[str]=None , __snake_case : str=None , **__snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = {}
if truncation is not None:
UpperCAmelCase_ : Dict = truncation
UpperCAmelCase_ : Any = generate_kwargs
UpperCAmelCase_ : int = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase_ : Union[str, Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : List[str] = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowerCamelCase ( self : List[Any] , __snake_case : int , __snake_case : int , __snake_case : int ):
'''simple docstring'''
return True
def _lowerCamelCase ( self : Optional[Any] , *__snake_case : Dict , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , UpperCAmelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase_ : List[Any] = ([prefix + arg for arg in args[0]],)
UpperCAmelCase_ : int = True
elif isinstance(args[0] , UpperCAmelCase__ ):
UpperCAmelCase_ : List[Any] = (prefix + args[0],)
UpperCAmelCase_ : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCAmelCase_ : Dict = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] , *__snake_case : Dict , **__snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if (
isinstance(args[0] , UpperCAmelCase__ )
and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] )
and all(len(UpperCAmelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _lowerCamelCase ( self : int , __snake_case : Optional[Any] , __snake_case : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **__snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ )
return inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Dict , **__snake_case : Any ):
'''simple docstring'''
if self.framework == "pt":
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase_ : Dict = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase_ : Optional[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(UpperCAmelCase__ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase_ : Tuple = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ : List[str] = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : Union[str, Any] = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : int = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _lowerCamelCase ( self : int , __snake_case : Optional[Any] , __snake_case : Optional[int]=ReturnType.TEXT , __snake_case : Dict=False ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : int = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase_ : Dict = {
f'''{self.return_name}_text''': self.tokenizer.decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , )
}
records.append(UpperCAmelCase__ )
return records
@add_end_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[Any] = 'summary'
def __call__( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self : str , __snake_case : int , __snake_case : int , __snake_case : int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : int = 'translation'
def _lowerCamelCase ( self : List[Any] , __snake_case : int , __snake_case : int , __snake_case : int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def _lowerCamelCase ( self : int , *__snake_case : Optional[int] , __snake_case : Any=TruncationStrategy.DO_NOT_TRUNCATE , __snake_case : Optional[Any]=None , __snake_case : List[Any]=None ):
'''simple docstring'''
if getattr(self.tokenizer , '''_build_translation_inputs''' , UpperCAmelCase__ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ )
def _lowerCamelCase ( self : Tuple , __snake_case : Dict=None , __snake_case : Optional[int]=None , **__snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = super()._sanitize_parameters(**UpperCAmelCase__ )
if src_lang is not None:
UpperCAmelCase_ : Optional[Any] = src_lang
if tgt_lang is not None:
UpperCAmelCase_ : Tuple = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase_ : int = kwargs.get('''task''' , self.task )
UpperCAmelCase_ : Tuple = task.split('''_''' )
if task and len(UpperCAmelCase__ ) == 4:
# translation, XX, to YY
UpperCAmelCase_ : int = items[1]
UpperCAmelCase_ : List[Any] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__snake_case : List[str] , **__snake_case : Any ):
'''simple docstring'''
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 721
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : int , __snake_case : List[Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[str]=False , __snake_case : List[str]=False , __snake_case : Tuple=False , __snake_case : List[str]=2 , __snake_case : Optional[int]=99 , __snake_case : Tuple=0 , __snake_case : int=32 , __snake_case : Optional[int]=5 , __snake_case : str=4 , __snake_case : str=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=512 , __snake_case : Tuple=2 , __snake_case : List[Any]=0.02 , __snake_case : Any=2 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="last" , __snake_case : Dict=True , __snake_case : Any=None , __snake_case : str=0 , ):
'''simple docstring'''
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_lengths
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : int = gelu_activation
UpperCAmelCase_ : str = sinusoidal_embeddings
UpperCAmelCase_ : List[str] = causal
UpperCAmelCase_ : Tuple = asm
UpperCAmelCase_ : List[Any] = n_langs
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = n_special
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[Any] = num_choices
UpperCAmelCase_ : Any = summary_type
UpperCAmelCase_ : Optional[int] = use_proj
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_input_lengths:
UpperCAmelCase_ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , langs=__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : Any = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
UpperCAmelCase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : List[str] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
UpperCAmelCase_ : Optional[Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCAmelCase_ : Optional[int] = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((UpperCAmelCase_) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCamelCase ( self : str , __snake_case : Dict , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ : int = self.num_choices
UpperCAmelCase_ : int = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str=False ):
'''simple docstring'''
UpperCAmelCase_ : int = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : str , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : Dict = min_length + idx + 1
UpperCAmelCase_ : List[Any] = min_length + idx + 1
UpperCAmelCase_ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def _lowerCamelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Optional[Any]=False , __snake_case : str=1 ):
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
UpperCAmelCase_ : str = min_length + idx + 1
UpperCAmelCase_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__snake_case )
UpperCAmelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
UpperCAmelCase_ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase_ : Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 641
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__UpperCamelCase : Dict = pd.read_csv('sample_data.csv', header=None)
__UpperCamelCase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
__UpperCamelCase : List[str] = df.iloc[:, 1:2]
__UpperCamelCase : Union[str, Any] = actual_data.values.reshape(len_data, 1)
__UpperCamelCase : str = MinMaxScaler().fit_transform(actual_data)
__UpperCamelCase : Any = 10
__UpperCamelCase : str = 5
__UpperCamelCase : Union[str, Any] = 20
__UpperCamelCase : Optional[Any] = len_data - periods * look_back
__UpperCamelCase : int = actual_data[:division]
__UpperCamelCase : Optional[int] = actual_data[division - look_back :]
__UpperCamelCase , __UpperCamelCase : int = [], []
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__UpperCamelCase : int = np.array(train_x)
__UpperCamelCase : Optional[int] = np.array(test_x)
__UpperCamelCase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y])
__UpperCamelCase : Any = np.array([list(i.ravel()) for i in test_y])
__UpperCamelCase : List[Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__UpperCamelCase : int = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__UpperCamelCase : Dict = model.predict(x_test)
| 700
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 0
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCamelCase : Optional[int] = False
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe(
image=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : int = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 701
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
UpperCAmelCase_ : str = HfArgumentParser(__lowercase )
UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=__lowercase )
try:
UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
UpperCAmelCase_ : List[str] = ''' '''.join(str(__lowercase ).split(''' ''' )[:-1] )
UpperCAmelCase_ : Optional[int] = ''''''
UpperCAmelCase_ : Dict = eval(str(__lowercase ).split(''' ''' )[-1] )
UpperCAmelCase_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase_ : Tuple = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 641
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Dict = 'Hello world! cécé herlolip'
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = FairseqRobertaModel.from_pretrained(lowerCAmelCase_ )
roberta.eval() # disable dropout
UpperCAmelCase_ : Optional[int] = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCAmelCase_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = XLMRobertaXLForSequenceClassification(lowerCAmelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ : int = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ : Union[str, Any] = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ : List[Any] = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ : List[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ : BertLayer = model.roberta.encoder.layer[i]
UpperCAmelCase_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
UpperCAmelCase_ : RobertaAttention = layer.attention
UpperCAmelCase_ : Optional[Any] = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ : List[str] = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ : Optional[Any] = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ : int = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ : Optional[int] = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ : Any = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ : Optional[Any] = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ : Union[str, Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ : str = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ : Optional[Any] = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : str = roberta_layer.fca.weight
UpperCAmelCase_ : List[Any] = roberta_layer.fca.bias
# output
UpperCAmelCase_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : Optional[int] = roberta_layer.fca.weight
UpperCAmelCase_ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ : List[Any] = roberta.model.classification_heads['''mnli'''].dense.weight
UpperCAmelCase_ : Tuple = roberta.model.classification_heads['''mnli'''].dense.bias
UpperCAmelCase_ : int = roberta.model.classification_heads['''mnli'''].out_proj.weight
UpperCAmelCase_ : Any = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
UpperCAmelCase_ : Tuple = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ : Union[str, Any] = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ : int = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ : int = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ : Union[str, Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ : torch.Tensor = roberta.encode(lowerCAmelCase_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ : Any = model(lowerCAmelCase_ )[0]
if classification_head:
UpperCAmelCase_ : Any = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase_ ) )
else:
UpperCAmelCase_ : Any = roberta.model(lowerCAmelCase_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase_ : List[Any] = torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(lowerCAmelCase_ ).mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 703
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( __lowercase , __lowercase ):
# Load checkpoint
UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase_ : Tuple = v
else:
UpperCAmelCase_ : Union[str, Any] = v
UpperCAmelCase_ : int = chkpt['''params''']
UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase_ : int = chkpt['''dico_word2id''']
UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(__lowercase , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 641
| 0
|
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCAmelCase__( _UpperCAmelCase ):
'''simple docstring'''
A_ : List[str] = """Salesforce/blip-image-captioning-base"""
A_ : List[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
A_ : Optional[int] = """image_captioner"""
A_ : Union[str, Any] = AutoModelForVisionaSeq
A_ : Any = ["""image"""]
A_ : Dict = ["""text"""]
def __init__( self : Optional[Any] , *__snake_case : str , **__snake_case : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*lowercase__ , **lowercase__ )
def _lowerCamelCase ( self : Dict , __snake_case : List[str] ):
'''simple docstring'''
return self.pre_processor(images=lowercase__ , return_tensors='''pt''' )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Dict ):
'''simple docstring'''
return self.model.generate(**lowercase__ )
def _lowerCamelCase ( self : List[Any] , __snake_case : int ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )[0].strip()
| 704
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Optional[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : int = TaTokenizer
A_ : List[int] = []
def __init__( self : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=None , __snake_case : int="</s>" , __snake_case : List[Any]="<unk>" , __snake_case : Dict="<pad>" , __snake_case : Tuple=100 , __snake_case : int=None , **__snake_case : Any , ):
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ : Any = len(set(filter(lambda __snake_case : bool('''extra_id_''' in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__snake_case , tokenizer_file=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : List[str] = False if not self.vocab_file else True
UpperCAmelCase_ : Union[str, Any] = extra_ids
@staticmethod
def _lowerCamelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __snake_case , )
return max_model_length
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def _lowerCamelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return list(
set(filter(lambda __snake_case : bool(re.search(R'''<extra_id_\d+>''' , __snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__snake_case ) for token in self.get_sentinel_tokens()]
| 641
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[int] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase_ : str = text_generator('''This is a test''' , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
UpperCAmelCase_ : Any = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__snake_case , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
UpperCAmelCase_ : Optional[Any] = text_generator('''This is a test''' , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{'''generated_token_ids''': ANY(__snake_case )},
{'''generated_token_ids''': ANY(__snake_case )},
] , )
UpperCAmelCase_ : Any = text_generator.model.config.eos_token_id
UpperCAmelCase_ : Union[str, Any] = '''<pad>'''
UpperCAmelCase_ : Dict = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{'''generated_token_ids''': ANY(__snake_case )},
{'''generated_token_ids''': ANY(__snake_case )},
],
[
{'''generated_token_ids''': ANY(__snake_case )},
{'''generated_token_ids''': ANY(__snake_case )},
],
] , )
@require_tf
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase_ : Optional[int] = text_generator('''This is a test''' , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
UpperCAmelCase_ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def _lowerCamelCase ( self : Tuple , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''Hello I believe in'''
UpperCAmelCase_ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ : str = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
UpperCAmelCase_ : str = text_generator(__snake_case , stop_sequence=''' fe''' )
self.assertEqual(__snake_case , [{'''generated_text''': '''Hello I believe in fe'''}] )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Any = text_generator.model
UpperCAmelCase_ : int = text_generator.tokenizer
UpperCAmelCase_ : Optional[Any] = text_generator('''This is a test''' )
self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
UpperCAmelCase_ : Tuple = text_generator('''This is a test''' , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
UpperCAmelCase_ : Dict = pipeline(task='''text-generation''' , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
UpperCAmelCase_ : List[Any] = text_generator('''This is a test''' )
self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
UpperCAmelCase_ : int = text_generator('''This is a test''' , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
UpperCAmelCase_ : Dict = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{'''generated_text''': ANY(__snake_case )}, {'''generated_text''': ANY(__snake_case )}],
[{'''generated_text''': ANY(__snake_case )}, {'''generated_text''': ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase_ : Optional[Any] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{'''generated_text''': ANY(__snake_case )}, {'''generated_text''': ANY(__snake_case )}],
[{'''generated_text''': ANY(__snake_case )}, {'''generated_text''': ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : Tuple = text_generator('''test''' , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : Tuple = text_generator('''test''' , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
UpperCAmelCase_ : Tuple = text_generator('''test''' , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase_ : Any = text_generator('''''' )
self.assertEqual(__snake_case , [{'''generated_text''': ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase_ : str = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase_ : Dict = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
UpperCAmelCase_ : Union[str, Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
UpperCAmelCase_ : int = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase_ : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
__snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase_ : Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase_ : Optional[Any] = pipe('''This is a test''' )
self.assertEqual(
__snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase_ : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase_ : Any = pipe('''This is a test''' )
self.assertEqual(
__snake_case , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
import torch
UpperCAmelCase_ : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
import torch
UpperCAmelCase_ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__snake_case , top_p=0.5 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = '''Hello world'''
UpperCAmelCase_ : List[str] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
UpperCAmelCase_ : List[str] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
UpperCAmelCase_ : Union[str, Any] = logging.get_logger('''transformers.generation.utils''' )
UpperCAmelCase_ : List[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
UpperCAmelCase_ : Optional[Any] = text_generator(__snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
UpperCAmelCase_ : List[Any] = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
UpperCAmelCase_ : List[Any] = text_generator(__snake_case , max_length=10 )
self.assertNotIn(__snake_case , cl.out )
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __lowercase , __lowercase = None ):
UpperCAmelCase_ : Tuple = word_bank or []
# create a table
UpperCAmelCase_ : Optional[Any] = len(__lowercase ) + 1
UpperCAmelCase_ : Union[str, Any] = []
for _ in range(__lowercase ):
table.append([] )
# seed value
UpperCAmelCase_ : Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowercase )] == word:
UpperCAmelCase_ : List[Any] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowercase )]:
combination.reverse()
return table[len(__lowercase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 706
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 0
|
def snake_case_ ( __lowercase ):
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : str = gray_code_sequence_string(lowercase_ )
#
# convert them to integers
for i in range(len(lowercase_ ) ):
UpperCAmelCase_ : Union[str, Any] = int(sequence[i] , 2 )
return sequence
def snake_case_ ( __lowercase ):
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : Optional[int] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Dict = "0" + smaller_sequence[i]
sequence.append(lowercase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Tuple = "1" + smaller_sequence[i]
sequence.append(lowercase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.