code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : str =BlenderbotConfig
a : Tuple ={}
a : List[Any] ="""gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ):
"""simple docstring"""
lowerCAmelCase : Any = parent
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : Tuple = pad_token_id
lowerCAmelCase : str = bos_token_id
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Optional[Any] = prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = TFBlenderbotModel(config=snake_case__ ).get_decoder()
lowerCAmelCase : Union[str, Any] = inputs_dict["input_ids"]
lowerCAmelCase : int = input_ids[:1, :]
lowerCAmelCase : Any = inputs_dict["attention_mask"][:1, :]
lowerCAmelCase : Optional[int] = inputs_dict["head_mask"]
lowerCAmelCase : str = 1
# first forward pass
lowerCAmelCase : List[Any] = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
lowerCAmelCase , lowerCAmelCase : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase : Optional[int] = model(snake_case__ , attention_mask=snake_case__ )[0]
lowerCAmelCase : Dict = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase : Optional[Any] = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
a : Optional[int] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
a : List[str] =(
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
a : Tuple =True
a : List[str] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFBlenderbotModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : Any =["""My friends are cool but they eat too many carbs."""]
a : List[Any] ="""facebook/blenderbot-400M-distill"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.tokenizer(self.src_text , return_tensors="tf" )
lowerCAmelCase : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
lowerCAmelCase : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 645 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case ) -> int:
__lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 375 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.unet.config.in_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 | 0 |
import math
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 0
while num > 0:
lowercase__ = num % 8
lowercase__ = octal + (remainder * math.floor(math.pow(10 , __SCREAMING_SNAKE_CASE ) ))
counter += 1
lowercase__ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(__SCREAMING_SNAKE_CASE )}"""
def a ( ):
'''simple docstring'''
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 183 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
a_ : Optional[Any] = name
a_ : Dict = val
def __str__( self : Union[str, Any] ) -> List[Any]:
return f'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
return self.val < other.val
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : List[Any] = {}
a_ : Dict = {}
a_ : Any = self.build_heap(__SCREAMING_SNAKE_CASE )
def __getitem__( self : str , __SCREAMING_SNAKE_CASE : str ) -> Any:
return self.get_value(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
return (idx - 1) // 2
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
return idx * 2 + 2
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
return self.heap_dict[key]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
a_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
a_ : Tuple = self.get_parent_idx(__SCREAMING_SNAKE_CASE )
for idx, i in enumerate(__SCREAMING_SNAKE_CASE ):
a_ : str = idx
a_ : Union[str, Any] = i.val
for i in range(__SCREAMING_SNAKE_CASE , -1 , -1 ):
self.sift_down(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return array
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Dict:
while True:
a_ : List[str] = self.get_left_child_idx(__SCREAMING_SNAKE_CASE ) # noqa: E741
a_ : Any = self.get_right_child_idx(__SCREAMING_SNAKE_CASE )
a_ : Tuple = idx
if l < len(__SCREAMING_SNAKE_CASE ) and array[l] < array[idx]:
a_ : int = l
if r < len(__SCREAMING_SNAKE_CASE ) and array[r] < array[smallest]:
a_ : List[Any] = r
if smallest != idx:
a_ , a_ : List[str] = array[smallest], array[idx]
(
(
a_
) , (
a_
) ,
) : Dict = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
a_ : List[Any] = smallest
else:
break
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> List[str]:
a_ : List[str] = self.get_parent_idx(__SCREAMING_SNAKE_CASE )
while p >= 0 and self.heap[p] > self.heap[idx]:
a_ , a_ : Union[str, Any] = self.heap[idx], self.heap[p]
a_ , a_ : List[str] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
a_ : Optional[Any] = p
a_ : Optional[int] = self.get_parent_idx(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
return self.heap[0]
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ , a_ : Dict = self.heap[-1], self.heap[0]
a_ , a_ : int = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
a_ : Union[str, Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
self.heap.append(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = len(self.heap ) - 1
a_ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return len(self.heap ) == 0
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ) -> List[str]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
a_ : int = new_value
a_ : List[Any] = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase = Node('R', -1)
__lowerCAmelCase = Node('B', 6)
__lowerCAmelCase = Node('A', 3)
__lowerCAmelCase = Node('X', 1)
__lowerCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_UpperCamelCase : Optional[Any] = '\nHuman: <<task>>\n\nAssistant: '
_UpperCamelCase : str = 'huggingface-tools/default-prompts'
_UpperCamelCase : Any = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def __UpperCAmelCase ( A : int , A : str , A : List[Any]="run" ) -> str:
if prompt_or_repo_id is None:
UpperCAmelCase_ : Dict = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
UpperCAmelCase_ : List[Any] = cached_file(
__SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 541 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , ):
if config_name_or_path is None:
UpperCamelCase_ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
UpperCamelCase_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase_ = question_encoder_name_or_path
UpperCamelCase_ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
UpperCamelCase_ = RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
UpperCamelCase_ = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
UpperCamelCase_ = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
UpperCamelCase_ = gen_config
UpperCamelCase_ = question_encoder_config
UpperCamelCase_ = model_class.from_pretrained_question_encoder_generator(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
rag_model.save_pretrained(__SCREAMING_SNAKE_CASE)
# Sanity check.
model_class.from_pretrained(__SCREAMING_SNAKE_CASE)
# Save tokenizers.
UpperCamelCase_ = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE)
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/')
UpperCamelCase_ = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE)
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/')
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
snake_case__ : int = parser.parse_args()
snake_case__ : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class lowerCamelCase__ ( __lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase = """switch_transformers"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self ,lowerCamelCase_=3_2_1_2_8 ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=6_4 ,lowerCamelCase_=2_0_4_8 ,lowerCamelCase_=6_4 ,lowerCamelCase_=1_2 ,lowerCamelCase_=3 ,lowerCamelCase_=1_2 ,lowerCamelCase_=3 ,lowerCamelCase_=1_2 ,lowerCamelCase_=8 ,lowerCamelCase_=False ,lowerCamelCase_=0.01 ,lowerCamelCase_="float32" ,lowerCamelCase_=False ,lowerCamelCase_=3_2 ,lowerCamelCase_=1_2_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1E-6 ,lowerCamelCase_=0.0_01 ,lowerCamelCase_=0.0_01 ,lowerCamelCase_=1.0 ,lowerCamelCase_="relu" ,lowerCamelCase_=True ,lowerCamelCase_=False ,lowerCamelCase_=True ,lowerCamelCase_=0 ,lowerCamelCase_=1 ,**lowerCamelCase_ ,) -> List[str]:
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_sparse_encoder_layers
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A = self.num_layers // self.num_sparse_encoder_layers
else:
A = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A = self.num_decoder_layers # HACK: this will create 0 sparse layers
A = num_heads
A = num_experts
A = expert_capacity
A = router_bias
A = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A = router_dtype
A = router_ignore_padding_tokens
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = add_router_probs
A = router_z_loss_coef
A = router_aux_loss_coef
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
super().__init__(
pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,is_encoder_decoder=lowerCamelCase_ ,**lowerCamelCase_ ,)
| 617 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
class A :
def __init__( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCamelCase__ ) != 0:
lowercase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase__ , (int, float) ):
raise error
lowercase__ = rows
else:
lowercase__ = []
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.rows )
@property
def A__ ( self ) -> str:
'''simple docstring'''
return len(self.rows[0] )
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.order[0] == self.order[1]
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def A__ ( self ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
return bool(self.determinant() )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase__ ).determinant()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase__ , lowerCamelCase__ )
return -1 * self.get_minor(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> str:
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCamelCase__ , lowerCamelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A__ ( self ) -> Dict:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase__ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return str(self.rows )
def __str__( self ) -> List[Any]:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCamelCase__ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Optional[int]:
'''simple docstring'''
lowercase__ = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase__ , (int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCamelCase__ )
else:
lowercase__ = self.rows[0:position] + [row] + self.rows[position:]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[Any]:
'''simple docstring'''
lowercase__ = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase__ , (int, float) ):
raise type_error
if len(lowerCamelCase__ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
lowercase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return not self == other
def __neg__( self ) -> Tuple:
'''simple docstring'''
return self * -1
def __add__( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCamelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase__ , lowerCamelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
lowercase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A__ ( cls , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
from jiwer import compute_measures
import datasets
A_ : List[str] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A_ : Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A_ : Tuple = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase( datasets.Metric ):
"""simple docstring"""
def _a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def _a ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False ):
if concatenate_texts:
return compute_measures(_lowerCamelCase , _lowerCamelCase )["wer"]
else:
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: Optional[Any] = 0
for prediction, reference in zip(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = compute_measures(_lowerCamelCase , _lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 57 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _lowerCamelCase ( __a=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser('''env''' )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''', default=__SCREAMING_SNAKE_CASE, help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = torch.__version__
SCREAMING_SNAKE_CASE_ = torch.cuda.is_available()
SCREAMING_SNAKE_CASE_ = is_xpu_available()
SCREAMING_SNAKE_CASE_ = is_npu_available()
SCREAMING_SNAKE_CASE_ = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = load_config_from_file(args.config_file ).to_dict()
SCREAMING_SNAKE_CASE_ = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'{pt_version} ({pt_cuda_available})',
'''PyTorch XPU available''': str(__SCREAMING_SNAKE_CASE ),
'''PyTorch NPU available''': str(__SCREAMING_SNAKE_CASE ),
'''System RAM''': F'{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB',
}
if pt_cuda_available:
SCREAMING_SNAKE_CASE_ = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
SCREAMING_SNAKE_CASE_ = (
'''\n'''.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
else F'\t{accelerate_config}'
)
print(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = accelerate_config
return info
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = env_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
env_command(__SCREAMING_SNAKE_CASE )
return 0
if __name__ == "__main__":
raise SystemExit(main()) | 626 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ = 16 , snake_case__ = 88 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = 32 , snake_case__ = None , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = "geglu" , snake_case__ = None , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case__ , attention_head_dim=snake_case__ , in_channels=snake_case__ , num_layers=snake_case__ , dropout=snake_case__ , norm_num_groups=snake_case__ , cross_attention_dim=snake_case__ , attention_bias=snake_case__ , sample_size=snake_case__ , num_vector_embeds=snake_case__ , activation_fn=snake_case__ , num_embeds_ada_norm=snake_case__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCAmelCase : int = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCAmelCase : List[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCAmelCase : Optional[int] = [1, 0]
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = hidden_states
lowerCAmelCase : Tuple = []
lowerCAmelCase : Tuple = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCAmelCase : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCAmelCase : Dict = self.transformer_index_for_condition[i]
lowerCAmelCase : List[str] = self.transformers[transformer_index](
snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ , cross_attention_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCAmelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCAmelCase : Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case__ )
| 645 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case = """sshleifer/mar_enro_6_3_student"""
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
super().setUp()
UpperCamelCase :str = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :List[Any] = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCAmelCase ( self ) -> int:
MarianMTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCamelCase :List[Any] = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCamelCase :str = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCamelCase :Tuple = bash_script.replace(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCamelCase :List[str] = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCamelCase :Tuple = ['''finetune.py'''] + bash_script.split() + args
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Optional[int] = argparse.ArgumentParser()
UpperCamelCase :List[str] = pl.Trainer.add_argparse_args(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = SummarizationModule.add_model_specific_args(SCREAMING_SNAKE_CASE_ , os.getcwd() )
UpperCamelCase :str = parser.parse_args()
UpperCamelCase :Optional[int] = main(SCREAMING_SNAKE_CASE_ )
# Check metrics
UpperCamelCase :int = load_json(model.metrics_save_path )
UpperCamelCase :Optional[int] = metrics['''val'''][0]
UpperCamelCase :Any = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , SCREAMING_SNAKE_CASE_ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase :Optional[int] = os.listdir(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCamelCase :Optional[int] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
UpperCamelCase :List[Any] = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase :Tuple = {os.path.basename(SCREAMING_SNAKE_CASE_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Optional[int] = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
UpperCamelCase :Optional[int] = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCamelCase :List[str] = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCamelCase :Tuple = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCamelCase :Any = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCamelCase :str = bash_script.replace(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :str = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = bash_script.replace('''--fp16''' , '''''' )
UpperCamelCase :List[Any] = 6
UpperCamelCase :Union[str, Any] = (
['''distillation.py''']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'''--gpus=1''',
'''--learning_rate=1e-3''',
F'''--num_train_epochs={epochs}''',
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = argparse.ArgumentParser()
UpperCamelCase :Dict = pl.Trainer.add_argparse_args(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = SummarizationDistiller.add_model_specific_args(SCREAMING_SNAKE_CASE_ , os.getcwd() )
UpperCamelCase :Union[str, Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCamelCase :int = distill_main(SCREAMING_SNAKE_CASE_ )
# Check metrics
UpperCamelCase :Optional[int] = load_json(model.metrics_save_path )
UpperCamelCase :List[str] = metrics['''val'''][0]
UpperCamelCase :Any = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , SCREAMING_SNAKE_CASE_ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase :List[str] = os.listdir(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCamelCase :Tuple = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
UpperCamelCase :str = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase :str = {os.path.basename(SCREAMING_SNAKE_CASE_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 658 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE_ : int = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''169M''': 768,
'''430M''': 1024,
'''1B5''': 2048,
'''3B''': 2560,
'''7B''': 4096,
'''14B''': 5120,
}
def SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[Any]:
__lowercase = list(state_dict.keys() )
for name in state_dict_keys:
__lowercase = state_dict.pop(__SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith('emb.' ):
__lowercase = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__lowercase = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__lowercase = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , __SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
__lowercase = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , __SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__lowercase = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__lowercase = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__lowercase = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__lowercase = 'rwkv.' + name
__lowercase = weight
return state_dict
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=False , snake_case=None ) -> str:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__lowercase = 50_277
__lowercase = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__lowercase = PreTrainedTokenizerFast(tokenizer_file=__SCREAMING_SNAKE_CASE )
__lowercase = len(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
# 2. Build the config
__lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
__lowercase = RwkvConfig(
vocab_size=__SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
__lowercase = hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowercase = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
__lowercase = convert_state_dict(__SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
__lowercase , __lowercase = shard_checkpoint(__SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if index is not None:
__lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save the index as well
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
__lowercase = json.dumps(__SCREAMING_SNAKE_CASE , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE ) + '\n'
f.write(__SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase = torch.load(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__lowercase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
model.push_to_hub(__SCREAMING_SNAKE_CASE , max_shard_size='2GB' )
tokenizer.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 375 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=1e-12 ):
'''simple docstring'''
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(__SCREAMING_SNAKE_CASE , norm_emb_a.T )
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = jnp.floataa
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = FlaxCLIPVisionModule(self.config.vision_config )
lowercase__ = nn.Dense(self.config.projection_dim, use_bias=lowerCamelCase, dtype=self.dtype )
lowercase__ = self.param('''concept_embeds''', jax.nn.initializers.ones, (17, self.config.projection_dim) )
lowercase__ = self.param(
'''special_care_embeds''', jax.nn.initializers.ones, (3, self.config.projection_dim) )
lowercase__ = self.param('''concept_embeds_weights''', jax.nn.initializers.ones, (17,) )
lowercase__ = self.param('''special_care_embeds_weights''', jax.nn.initializers.ones, (3,) )
def __call__( self : List[str], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = self.vision_model(lowerCamelCase )[1]
lowercase__ = self.visual_projection(lowerCamelCase )
lowercase__ = jax_cosine_distance(lowerCamelCase, self.special_care_embeds )
lowercase__ = jax_cosine_distance(lowerCamelCase, self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase__ = 0.0
lowercase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase__ = jnp.round(lowerCamelCase, 3 )
lowercase__ = jnp.any(special_scores > 0, axis=1, keepdims=lowerCamelCase )
# Use a lower threshold if an image has any special care concept
lowercase__ = is_special_care * 0.01
lowercase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase__ = jnp.round(lowerCamelCase, 3 )
lowercase__ = jnp.any(concept_scores > 0, axis=1 )
return has_nsfw_concepts
class _UpperCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
lowercase__ = CLIPConfig
lowercase__ = """clip_input"""
lowercase__ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : str = None, lowerCamelCase : List[Any] = 0, lowerCamelCase : List[Any] = jnp.floataa, lowerCamelCase : int = True, **lowerCamelCase : int, ):
'''simple docstring'''
if input_shape is None:
lowercase__ = (1, 224, 224, 3)
lowercase__ = self.module_class(config=lowerCamelCase, dtype=lowerCamelCase, **lowerCamelCase )
super().__init__(lowerCamelCase, lowerCamelCase, input_shape=lowerCamelCase, seed=lowerCamelCase, dtype=lowerCamelCase, _do_init=_do_init )
def lowercase__ ( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple = None ):
'''simple docstring'''
# init input tensor
lowercase__ = jax.random.normal(lowerCamelCase, lowerCamelCase )
lowercase__ , lowercase__ = jax.random.split(lowerCamelCase )
lowercase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
lowercase__ = self.module.init(lowerCamelCase, lowerCamelCase )['''params''']
return random_params
def __call__( self : List[Any], lowerCamelCase : int, lowerCamelCase : Union[str, Any] = None, ):
'''simple docstring'''
lowercase__ = jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params}, jnp.array(lowerCamelCase, dtype=jnp.floataa ), rngs={}, )
| 183 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
snake_case__ = """efficientnet"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] = 3 , __SCREAMING_SNAKE_CASE : Dict = 600 , __SCREAMING_SNAKE_CASE : Optional[Any] = 2.0 , __SCREAMING_SNAKE_CASE : Dict = 3.1 , __SCREAMING_SNAKE_CASE : Optional[Any] = 8 , __SCREAMING_SNAKE_CASE : Any = [3, 3, 5, 3, 5, 5, 3] , __SCREAMING_SNAKE_CASE : Any = [32, 16, 24, 40, 80, 112, 192] , __SCREAMING_SNAKE_CASE : Dict = [16, 24, 40, 80, 112, 192, 320] , __SCREAMING_SNAKE_CASE : Any = [] , __SCREAMING_SNAKE_CASE : str = [1, 2, 2, 2, 1, 2, 1] , __SCREAMING_SNAKE_CASE : int = [1, 2, 2, 3, 3, 4, 1] , __SCREAMING_SNAKE_CASE : int = [1, 6, 6, 6, 6, 6, 6] , __SCREAMING_SNAKE_CASE : int = 0.25 , __SCREAMING_SNAKE_CASE : Tuple = "swish" , __SCREAMING_SNAKE_CASE : Union[str, Any] = 2560 , __SCREAMING_SNAKE_CASE : Tuple = "mean" , __SCREAMING_SNAKE_CASE : Any = 0.02 , __SCREAMING_SNAKE_CASE : Any = 0.001 , __SCREAMING_SNAKE_CASE : str = 0.99 , __SCREAMING_SNAKE_CASE : Any = 0.5 , __SCREAMING_SNAKE_CASE : Union[str, Any] = 0.2 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> int:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = num_channels
a_ : Optional[Any] = image_size
a_ : Dict = width_coefficient
a_ : List[Any] = depth_coefficient
a_ : Optional[int] = depth_divisor
a_ : Dict = kernel_sizes
a_ : List[str] = in_channels
a_ : Union[str, Any] = out_channels
a_ : Tuple = depthwise_padding
a_ : Optional[Any] = strides
a_ : Dict = num_block_repeats
a_ : List[str] = expand_ratios
a_ : str = squeeze_expansion_ratio
a_ : Dict = hidden_act
a_ : Tuple = hidden_dim
a_ : List[str] = pooling_type
a_ : List[Any] = initializer_range
a_ : Optional[int] = batch_norm_eps
a_ : Tuple = batch_norm_momentum
a_ : List[Any] = dropout_rate
a_ : List[Any] = drop_connect_rate
a_ : Union[str, Any] = sum(__SCREAMING_SNAKE_CASE ) * 4
class SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
snake_case__ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return 1e-5
| 466 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ ( __lowerCamelCase , unittest.TestCase):
a_ = ReformerTokenizer
a_ = ReformerTokenizerFast
a_ = True
a_ = False
a_ = True
def A ( self : Any ) -> int:
super().setUp()
UpperCAmelCase_ : Optional[Any] = ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : int ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = '''<s>'''
UpperCAmelCase_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 10_00 )
def A ( self : int ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A ( self : Any ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase_ : Tuple = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : int = tokenizer.tokenize(_A )
UpperCAmelCase_ : List[str] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : int = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase_ : Any = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_A )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def A ( self : Dict , _A : Union[str, Any]=15 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase_ : Dict = '''This is a simple input'''
UpperCAmelCase_ : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : str = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : List[str] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def A ( self : Union[str, Any] ) -> str:
pass
def A ( self : str ) -> Any:
UpperCAmelCase_ : Optional[int] = ReformerTokenizer(_A , keep_accents=_A )
UpperCAmelCase_ : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A ( self : str ) -> Union[str, Any]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = '''Hello World!'''
UpperCAmelCase_ : Optional[int] = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def A ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ : Dict = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def A ( self : Optional[int] ) -> List[str]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ : List[Any] = ''' '''.join(_A )
UpperCAmelCase_ : Union[str, Any] = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' )
UpperCAmelCase_ : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
UpperCAmelCase_ : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape
UpperCAmelCase_ : Optional[Any] = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def A ( self : Tuple ) -> int:
# fmt: off
UpperCAmelCase_ : str = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ : str = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
| 541 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _snake_case (__lowercase): # picklable for multiprocessing
return x.sum()
def _snake_case (__lowercase): # picklable for multiprocessing
return i + 1
@dataclass
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 1
UpperCamelCase_ = [1, 2]
UpperCamelCase_ = {'a': 1, 'b': 2}
UpperCamelCase_ = {'a': [1, 2], 'b': [3, 4]}
UpperCamelCase_ = {'a': {'1': 1}, 'b': 2}
UpperCamelCase_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 2
UpperCamelCase_ = [2, 3]
UpperCamelCase_ = {'a': 2, 'b': 3}
UpperCamelCase_ = {'a': [2, 3], 'b': [4, 5]}
UpperCamelCase_ = {'a': {'1': 2}, 'b': 3}
UpperCamelCase_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = 2
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
UpperCamelCase_ = {'a': 2, 'b': 0, 'c': 2}
UpperCamelCase_ = {
'a': np.eye(2 ).astype(_UpperCAmelCase ),
'b': np.zeros(3 ).astype(_UpperCAmelCase ),
'c': np.ones(2 ).astype(_UpperCAmelCase ),
}
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase , num_proc=_UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda _UpperCAmelCase : x + 1 , _UpperCAmelCase , num_proc=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = {'a': 1, 'b': 2}
UpperCamelCase_ = {'a': 3, 'b': 4}
UpperCamelCase_ = {'a': 5, 'b': 6}
UpperCamelCase_ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ) , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
class _a :
"""simple docstring"""
A_ = """bar"""
UpperCamelCase_ = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_UpperCAmelCase , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with patch('datasets.utils.py_utils._single_map_nested') as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool') as mock_multiprocessing_pool:
UpperCamelCase_ = {f"""{i}""": i for i in range(__SCREAMING_SNAKE_CASE)}
UpperCamelCase_ = map_nested(lambda __lowercase: x + 10 , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , parallel_min_length=16)
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( __lowerCamelCase ):
"""simple docstring"""
@require_tf
def _UpperCAmelCase ( self ) -> str:
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase_ = layers.Dense(2 )
def gen_random_output():
UpperCamelCase_ = tf.random.uniform((1, 3) )
return model(_UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_UpperCAmelCase ):
UpperCamelCase_ = gen_random_output()
with temp_seed(42 , set_tensorflow=_UpperCAmelCase ):
UpperCamelCase_ = gen_random_output()
UpperCamelCase_ = gen_random_output()
np.testing.assert_equal(_UpperCAmelCase , _UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
import torch
def gen_random_output():
UpperCamelCase_ = torch.nn.Linear(3 , 2 )
UpperCamelCase_ = torch.rand(1 , 3 )
return model(_UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_UpperCAmelCase ):
UpperCamelCase_ = gen_random_output()
with temp_seed(42 , set_pytorch=_UpperCAmelCase ):
UpperCamelCase_ = gen_random_output()
UpperCamelCase_ = gen_random_output()
np.testing.assert_equal(_UpperCAmelCase , _UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase_ = gen_random_output()
with temp_seed(42 ):
UpperCamelCase_ = gen_random_output()
UpperCamelCase_ = gen_random_output()
np.testing.assert_equal(_UpperCAmelCase , _UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}])
def _snake_case (__lowercase):
UpperCamelCase_ = NestedDataStructure(__SCREAMING_SNAKE_CASE).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = NestedDataStructure(__SCREAMING_SNAKE_CASE).flatten()
assert output == expected_output
def _snake_case ():
UpperCamelCase_ = A(x=1 , y='foobar')
UpperCamelCase_ = {'x': 1, 'y': 'foobar'}
assert asdict(__SCREAMING_SNAKE_CASE) == expected_output
UpperCamelCase_ = {'a': {'b': A(x=10 , y='foo')}, 'c': [A(x=20 , y='bar')]}
UpperCamelCase_ = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(__SCREAMING_SNAKE_CASE) == expected_output
with pytest.raises(__SCREAMING_SNAKE_CASE):
asdict([1, A(x=10 , y='foo')])
def _snake_case (__lowercase):
return text.split()
def _snake_case (__lowercase):
yield (time.time(), content)
time.sleep(2)
yield (time.time(), content)
def _snake_case ():
with Pool(2) as pool:
UpperCamelCase_ = list(iflatmap_unordered(__SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10))
assert out.count('hello') == 10
assert out.count('there') == 10
assert len(__SCREAMING_SNAKE_CASE) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2) as pool:
UpperCamelCase_ = list(iflatmap_unordered(__SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10))
assert out.count('hello') == 10
assert out.count('there') == 10
assert len(__SCREAMING_SNAKE_CASE) == 20
# check that we get items as fast as possible
with Pool(2) as pool:
UpperCamelCase_ = []
for yield_time, content in iflatmap_unordered(
__SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}]):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__SCREAMING_SNAKE_CASE)
assert out.count('a') == 2
assert out.count('b') == 2
assert len(__SCREAMING_SNAKE_CASE) == 4
| 23 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class lowerCamelCase__ ( __lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase = """roc_bert"""
def __init__( self ,lowerCamelCase_=3_0_5_2_2 ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=1_2 ,lowerCamelCase_=1_2 ,lowerCamelCase_=3_0_7_2 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1E-12 ,lowerCamelCase_=True ,lowerCamelCase_=0 ,lowerCamelCase_="absolute" ,lowerCamelCase_=None ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=9_1_0 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=2_4_8_5_8 ,lowerCamelCase_=True ,**lowerCamelCase_ ,) -> List[str]:
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
A = use_cache
A = enable_pronunciation
A = enable_shape
A = pronunciation_embed_dim
A = pronunciation_vocab_size
A = shape_embed_dim
A = shape_vocab_size
A = concat_input
A = position_embedding_type
A = classifier_dropout
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
| 617 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( __lowerCamelCase ):
lowerCamelCase : Optional[Any] = ["""image_processor""", """tokenizer"""]
lowerCamelCase : Union[str, Any] = """CLIPImageProcessor"""
lowerCamelCase : List[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase__ , )
lowercase__ = kwargs.pop("""feature_extractor""" )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowercase__ = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if images is not None:
lowercase__ = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self ) -> Any:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase__ , )
return self.image_processor_class
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase__ , )
return self.image_processor
| 325 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _lowerCAmelCase( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCamelCase_: Optional[int] = parent
UpperCamelCase_: int = batch_size
UpperCamelCase_: Union[str, Any] = seq_length
UpperCamelCase_: Dict = is_training
UpperCamelCase_: List[Any] = use_input_mask
UpperCamelCase_: Optional[int] = use_token_type_ids
UpperCamelCase_: Any = use_labels
UpperCamelCase_: List[str] = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: int = num_hidden_layers
UpperCamelCase_: List[Any] = num_attention_heads
UpperCamelCase_: str = intermediate_size
UpperCamelCase_: int = hidden_act
UpperCamelCase_: Tuple = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: List[str] = max_position_embeddings
UpperCamelCase_: Any = type_vocab_size
UpperCamelCase_: str = type_sequence_label_size
UpperCamelCase_: str = initializer_range
UpperCamelCase_: Union[str, Any] = num_labels
UpperCamelCase_: Optional[Any] = num_choices
UpperCamelCase_: Dict = scope
def _a ( self ):
UpperCamelCase_: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase_: Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: List[Any] = None
UpperCamelCase_: List[str] = None
UpperCamelCase_: Any = None
if self.use_labels:
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = DistilBertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Dict = model(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: int = DistilBertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = DistilBertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = self.num_labels
UpperCamelCase_: Optional[int] = DistilBertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = self.num_labels
UpperCamelCase_: List[Any] = DistilBertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = self.num_choices
UpperCamelCase_: Union[str, Any] = DistilBertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: int = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ):
UpperCamelCase_: List[str] = self.prepare_config_and_inputs()
((UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_)): str = config_and_inputs
UpperCamelCase_: str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a : str =(
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a : str =True
a : List[Any] =True
a : Optional[int] =True
a : Tuple =True
def _a ( self ):
UpperCamelCase_: List[str] = DistilBertModelTester(self )
UpperCamelCase_: str = ConfigTester(self , config_class=_lowerCamelCase , dim=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCamelCase )
@slow
def _a ( self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Tuple = DistilBertModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@slow
@require_torch_gpu
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCamelCase_: Union[str, Any] = True
UpperCamelCase_: Dict = model_class(config=_lowerCamelCase )
UpperCamelCase_: int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: int = torch.jit.trace(
_lowerCamelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCamelCase , os.path.join(_lowerCamelCase , 'traced_model.pt' ) )
UpperCamelCase_: List[str] = torch.jit.load(os.path.join(_lowerCamelCase , 'traced_model.pt' ) , map_location=_lowerCamelCase )
loaded(inputs_dict['input_ids'].to(_lowerCamelCase ) , inputs_dict['attention_mask'].to(_lowerCamelCase ) )
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: Any = DistilBertModel.from_pretrained('distilbert-base-uncased' )
UpperCamelCase_: Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase_: List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase_: Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: List[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowerCamelCase )
UpperCamelCase_: Optional[int] = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) ) | 57 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84 | 0 |
"""simple docstring"""
import torch
from torch import nn
class snake_case ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = n_token
SCREAMING_SNAKE_CASE_ = d_embed
SCREAMING_SNAKE_CASE_ = d_proj
SCREAMING_SNAKE_CASE_ = cutoffs + [n_token]
SCREAMING_SNAKE_CASE_ = [0] + self.cutoffs
SCREAMING_SNAKE_CASE_ = div_val
SCREAMING_SNAKE_CASE_ = self.cutoffs[0]
SCREAMING_SNAKE_CASE_ = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE_ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
SCREAMING_SNAKE_CASE_ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
else:
self.out_projs.append(SCREAMING_SNAKE_CASE_ )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE_ , r_idx - l_idx ) )
SCREAMING_SNAKE_CASE_ = keep_order
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if proj is None:
SCREAMING_SNAKE_CASE_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , proj.t().contiguous() )
SCREAMING_SNAKE_CASE_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE_ = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE_ = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE_ = hidden.view(-1 , hidden.size(-1 ) )
SCREAMING_SNAKE_CASE_ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
SCREAMING_SNAKE_CASE_ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE_ = labels != -1_00
SCREAMING_SNAKE_CASE_ = torch.zeros_like(SCREAMING_SNAKE_CASE_ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE_ = (
-nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE_ )
biases.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE_ = torch.zeros_like(SCREAMING_SNAKE_CASE_ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE_ = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE_ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE_ = labels.index_select(0 , SCREAMING_SNAKE_CASE_ ) - l_idx
SCREAMING_SNAKE_CASE_ = head_logprob.index_select(0 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = hidden.index_select(0 , SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 )
SCREAMING_SNAKE_CASE_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE_ = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , SCREAMING_SNAKE_CASE_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE_ )
biases.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 )
SCREAMING_SNAKE_CASE_ = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE_ = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 )
SCREAMING_SNAKE_CASE_ = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE_ = logprob_i
return out | 626 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : int = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : int = num_choices
lowerCAmelCase : Optional[int] = scope
lowerCAmelCase : Optional[int] = self.vocab_size - 1
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : str = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = OpenAIGPTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ , head_mask=snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = OpenAIGPTLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = self.num_labels
lowerCAmelCase : Optional[Any] = OpenAIGPTForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[str] = config_and_inputs
lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a : Tuple =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a : str =(
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : Tuple = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ , )
lowerCAmelCase : str = inputs_dict["labels"]
lowerCAmelCase : Optional[Any] = inputs_dict["labels"]
lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case__ , )
lowerCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = OpenAIGPTModelTester(self )
lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , n_embd=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Tuple = OpenAIGPTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(snake_case__ )
lowerCAmelCase : Tuple = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=snake_case__ ) # the president is
lowerCAmelCase : int = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase : List[Any] = model.generate(snake_case__ , do_sample=snake_case__ )
self.assertListEqual(output_ids[0].tolist() , snake_case__ )
| 645 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__snake_case = logging.get_logger(__name__)
# General docstring
__snake_case = """RegNetConfig"""
# Base docstring
__snake_case = """facebook/regnet-y-040"""
__snake_case = [1, 10_88, 7, 7]
# Image classification docstring
__snake_case = """facebook/regnet-y-040"""
__snake_case = """tabby, tabby cat"""
__snake_case = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = "relu" , ) -> Union[str, Any]:
super().__init__()
UpperCamelCase :Optional[int] = nn.Convad(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 , groups=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :int = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :int = self.convolution(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = self.normalization(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super().__init__()
UpperCamelCase :Any = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCamelCase :Dict = config.num_channels
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCamelCase :List[str] = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 ) -> int:
super().__init__()
UpperCamelCase :Optional[int] = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :int = self.convolution(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.normalization(SCREAMING_SNAKE_CASE_ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__()
UpperCamelCase :List[str] = nn.AdaptiveAvgPoolad((1, 1) )
UpperCamelCase :str = nn.Sequential(
nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
# b c h w -> b c 1 1
UpperCamelCase :int = self.pooler(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.attention(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = hidden_state * attention
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 ) -> Dict:
super().__init__()
UpperCamelCase :Optional[Any] = in_channels != out_channels or stride != 1
UpperCamelCase :Optional[int] = max(1 , out_channels // config.groups_width )
UpperCamelCase :int = (
RegNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase :Union[str, Any] = nn.Sequential(
RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
UpperCamelCase :Dict = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Union[str, Any] = hidden_state
UpperCamelCase :Optional[int] = self.layer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
UpperCamelCase :Optional[int] = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 ) -> Tuple:
super().__init__()
UpperCamelCase :Optional[Any] = in_channels != out_channels or stride != 1
UpperCamelCase :List[str] = max(1 , out_channels // config.groups_width )
UpperCamelCase :int = (
RegNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase :List[Any] = nn.Sequential(
RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , RegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
UpperCamelCase :List[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :Optional[int] = hidden_state
UpperCamelCase :int = self.layer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
UpperCamelCase :List[str] = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , ) -> Dict:
super().__init__()
UpperCamelCase :List[str] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
UpperCamelCase :List[str] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , ) , *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(depth - 1 )] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :List[Any] = self.layers(SCREAMING_SNAKE_CASE_ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
super().__init__()
UpperCamelCase :int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase :str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ):
self.stages.append(RegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase :int = hidden_states + (hidden_state,)
UpperCamelCase :str = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
UpperCamelCase :Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase_ : str =RegNetConfig
UpperCamelCase_ : Optional[Any] ="""regnet"""
UpperCamelCase_ : Tuple ="""pixel_values"""
UpperCamelCase_ : str =True
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = value
__snake_case = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__snake_case = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.', __lowerCamelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = config
UpperCamelCase :Dict = RegNetEmbeddings(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = RegNetEncoder(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ) -> Union[str, Any]:
UpperCamelCase :Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase :List[str] = self.embedder(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = encoder_outputs[0]
UpperCamelCase :List[Any] = self.pooler(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', __lowerCamelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = config.num_labels
UpperCamelCase :Dict = RegNetModel(SCREAMING_SNAKE_CASE_ )
# classification head
UpperCamelCase :Dict = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Dict:
UpperCamelCase :Dict = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase :Dict = self.regnet(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase :Optional[int] = self.classifier(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase :List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase :List[str] = '''single_label_classification'''
else:
UpperCamelCase :str = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCamelCase :List[str] = MSELoss()
if self.num_labels == 1:
UpperCamelCase :str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase :Optional[int] = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase :Union[str, Any] = CrossEntropyLoss()
UpperCamelCase :List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase :int = BCEWithLogitsLoss()
UpperCamelCase :str = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
UpperCamelCase :Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 658 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__lowercase = ''
__lowercase = ''
__lowercase = []
__lowercase = 0
__lowercase = 256
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
def UpperCAmelCase ( self : Any , __lowerCamelCase : int ) -> List[Any]:
'''simple docstring'''
__lowercase = cva.imread(__lowerCamelCase , 0 )
__lowercase = copy.deepcopy(self.img )
__lowercase , __lowercase , __lowercase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
__lowercase = np.sum(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
__lowercase = x[i] / self.k
self.sk += prk
__lowercase = (self.L - 1) * self.sk
if self.rem != 0:
__lowercase = int(last % last )
__lowercase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCamelCase )
__lowercase = int(np.ma.count(self.img ) / self.img[1].size )
__lowercase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowercase = self.img[j][i]
if num != self.last_list[num]:
__lowercase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
SCREAMING_SNAKE_CASE_ : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 375 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.unet.config.in_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : int = 16
A__ : Optional[Any] = 32
def a ( lowerCamelCase_ , lowerCamelCase_ = 16 ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : Dict = mocked_dataloaders # noqa: F811
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __SCREAMING_SNAKE_CASE ) == "1":
lowercase__ = 2
# Initialize accelerator
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['''lr''']
lowercase__ = int(config['''num_epochs'''] )
lowercase__ = int(config['''seed'''] )
lowercase__ = int(config['''batch_size'''] )
lowercase__ = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__SCREAMING_SNAKE_CASE )
def inner_training_loop(lowerCamelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**__SCREAMING_SNAKE_CASE )
lowercase__ = outputs.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ = parser.parse_args()
lowercase__ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 183 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84 | 0 |
'''simple docstring'''
import os
import sys
__lowerCAmelCase = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowerCAmelCase = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _UpperCAmelCase ( *__A : Dict , **__A : Dict ):
return AutoConfig.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _UpperCAmelCase ( *__A : Any , **__A : Tuple ):
return AutoTokenizer.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def _UpperCAmelCase ( *__A : Optional[Any] , **__A : Dict ):
return AutoModel.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _UpperCAmelCase ( *__A : Optional[int] , **__A : str ):
return AutoModelForCausalLM.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _UpperCAmelCase ( *__A : Optional[Any] , **__A : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _UpperCAmelCase ( *__A : Optional[int] , **__A : List[Any] ):
return AutoModelForSequenceClassification.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _UpperCAmelCase ( *__A : Union[str, Any] , **__A : List[Any] ):
return AutoModelForQuestionAnswering.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 466 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 0 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Any = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
_UpperCamelCase : List[str] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def __UpperCAmelCase ( A : Optional[Any] ) -> Union[str, Any]:
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 1_0:
UpperCAmelCase_ : List[Any] = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 1_0:
UpperCAmelCase_ : Optional[Any] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 1_0:
UpperCAmelCase_ : Dict = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 1_0:
UpperCAmelCase_ : Union[str, Any] = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
UpperCAmelCase_ : List[Any] = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
UpperCAmelCase_ : Dict = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCAmelCase_ : Tuple = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
UpperCAmelCase_ : List[str] = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def __UpperCAmelCase ( A : List[Any] , A : Tuple , A : Union[str, Any] , A : Optional[Any] ) -> Dict:
UpperCAmelCase_ : List[str] = {}
import re
UpperCAmelCase_ : List[Any] = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCAmelCase_ : Dict = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase_ : Union[str, Any] = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase_ : int = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCAmelCase_ : Optional[Any] = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase_ : Tuple = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase_ : Optional[Any] = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
UpperCAmelCase_ : Dict = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase_ : List[str] = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Any = re_encoder_block_conv_in.match(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = regex_match.groups()
UpperCAmelCase_ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase_ : Tuple = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
UpperCAmelCase_ : Tuple = re_encoder_block_conv_in.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[Any] = re_encoder_block_resnet.match(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = regex_match.groups()
UpperCAmelCase_ : List[str] = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase_ : int = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCAmelCase_ : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
UpperCAmelCase_ : List[str] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCAmelCase_ : Optional[int] = prefix + resnet_block
UpperCAmelCase_ : str = re_encoder_block_resnet.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Any = re_encoder_block_proj_out.match(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = regex_match.groups()
UpperCAmelCase_ : Dict = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
UpperCAmelCase_ : Union[str, Any] = re_encoder_block_proj_out.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = re_decoder_block_conv_out.match(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = regex_match.groups()
UpperCAmelCase_ : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase_ : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
UpperCAmelCase_ : Dict = re_decoder_block_conv_out.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[Any] = re_decoder_block_resnet.match(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = regex_match.groups()
UpperCAmelCase_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase_ : int = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCAmelCase_ : Any = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
UpperCAmelCase_ : Any = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCAmelCase_ : Tuple = prefix + resnet_block
UpperCAmelCase_ : Any = re_decoder_block_resnet.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = re_decoder_block_proj_in.match(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = regex_match.groups()
UpperCAmelCase_ : Union[str, Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
UpperCAmelCase_ : Tuple = re_decoder_block_proj_in.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = re_prior_cond_conv_out.match(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = regex_match.groups()
UpperCAmelCase_ : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase_ : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
UpperCAmelCase_ : List[Any] = re_prior_cond_conv_out.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = re_prior_cond_resnet.match(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = regex_match.groups()
UpperCAmelCase_ : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase_ : Union[str, Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCAmelCase_ : Any = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
UpperCAmelCase_ : int = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCAmelCase_ : List[str] = prefix + resnet_block
UpperCAmelCase_ : Optional[Any] = re_prior_cond_resnet.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = re_prior_cond_proj_in.match(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = regex_match.groups()
UpperCAmelCase_ : Dict = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
UpperCAmelCase_ : List[Any] = re_prior_cond_proj_in.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# keep original key
else:
UpperCAmelCase_ : str = original_key
UpperCAmelCase_ : Optional[int] = replace_key(__SCREAMING_SNAKE_CASE )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
UpperCAmelCase_ : Dict = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
UpperCAmelCase_ : Optional[Any] = original_key
UpperCAmelCase_ : int = original_key
UpperCAmelCase_ : Tuple = value
return new_dict
@torch.no_grad()
def __UpperCAmelCase ( A : str=None , A : Optional[Any]=None ) -> int:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
UpperCAmelCase_ : Optional[int] = requests.get(F"{PREFIX}{file}" , allow_redirects=__SCREAMING_SNAKE_CASE )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=__SCREAMING_SNAKE_CASE )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , '''wb''' ).write(r.content )
UpperCAmelCase_ : Any = MODEL_MAPPING[model_name.split('''/''' )[-1]]
UpperCAmelCase_ : Tuple = JukeboxConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = JukeboxModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Any = {}
for i, dict_name in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['''model''']
UpperCAmelCase_ : str = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
UpperCAmelCase_ : Any = old_dic[k]
elif k.endswith('''.w''' ):
UpperCAmelCase_ : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCAmelCase_ : List[Any] = old_dic[k]
else:
UpperCAmelCase_ : Union[str, Any] = old_dic[k]
UpperCAmelCase_ : Tuple = '''vqvae''' if i == 0 else F"priors.{3 - i}"
UpperCAmelCase_ : Dict = fix_jukebox_keys(__SCREAMING_SNAKE_CASE , model.state_dict() , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
weight_dict.append(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = weight_dict.pop(0 )
model.vqvae.load_state_dict(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
with open(F"{pytorch_dump_folder_path}/mapping.json" , '''w''' ) as txtfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
_UpperCamelCase : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 541 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=2 , _UpperCAmelCase=24 , _UpperCAmelCase=16 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=None , _UpperCAmelCase=2 , _UpperCAmelCase=2 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = max_length
UpperCamelCase_ = num_mel_bins
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
UpperCamelCase_ = frequency_stride
UpperCamelCase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase_ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase_ = frequency_out_dimension * time_out_dimension
UpperCamelCase_ = num_patches + 2
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = self.get_config()
return config, input_values, labels
def _UpperCAmelCase ( self ) -> Dict:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = ASTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {'input_values': input_values}
return config, inputs_dict
@require_torch
class _a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A_ = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = ASTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['input_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = ASTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset')
UpperCamelCase_ , UpperCamelCase_ = torchaudio.load(__SCREAMING_SNAKE_CASE)
return audio, sampling_rate
@require_torch
@require_torchaudio
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.default_feature_extractor
UpperCamelCase_ = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_feature_extractor
UpperCamelCase_ , UpperCamelCase_ = prepare_audio()
UpperCamelCase_ = audio.squeeze().numpy()
UpperCamelCase_ = feature_extractor(_UpperCAmelCase , sampling_rate=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def _A ( _a : Any , _a : List[Any] , _a : Optional[Any] = 1_6_0_0_0 ):
"""simple docstring"""
A = int(round(sample_rate * max_length ) )
if len(__SCREAMING_SNAKE_CASE ) <= sample_length:
return wav
A = randint(0 , len(__SCREAMING_SNAKE_CASE ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = field(default=__lowerCamelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
_lowerCamelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_lowerCamelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_lowerCamelCase = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
_lowerCamelCase = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_lowerCamelCase = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
_lowerCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def UpperCamelCase__ ( self ) -> Any:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" ,lowerCamelCase_ ,)
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def _A ( ):
"""simple docstring"""
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A = training_args.get_process_log_level()
logger.setLevel(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
A = DatasetDict()
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--label_column_name` to the correct text column - one of """
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
A = feature_extractor.model_input_names[0]
def train_transforms(_a : Tuple ):
A = []
for audio in batch[data_args.audio_column_name]:
A = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__SCREAMING_SNAKE_CASE )
A = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
A = {model_input_name: inputs.get(__SCREAMING_SNAKE_CASE )}
A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_a : Optional[int] ):
A = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
A = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
A = {model_input_name: inputs.get(__SCREAMING_SNAKE_CASE )}
A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A = raw_datasets["""train"""].features[data_args.label_column_name].names
A , A = {}, {}
for i, label in enumerate(__SCREAMING_SNAKE_CASE ):
A = str(__SCREAMING_SNAKE_CASE )
A = label
# Load the accuracy metric from the datasets package
A = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_a : str ):
A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=eval_pred.label_ids )
A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__SCREAMING_SNAKE_CASE ) , labelaid=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
A = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__SCREAMING_SNAKE_CASE , output_all_columns=__SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__SCREAMING_SNAKE_CASE , output_all_columns=__SCREAMING_SNAKE_CASE )
# Initialize our trainer
A = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
A = None
if training_args.resume_from_checkpoint is not None:
A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A = last_checkpoint
A = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A = trainer.evaluate()
trainer.log_metrics("""eval""" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , __SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
A = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 617 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
'''simple docstring'''
def _A ( lowercase__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( lowercase__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : List[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure) | 57 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0_02 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = encoder_seq_length
SCREAMING_SNAKE_CASE_ = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE_ = self.decoder_seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = d_ff
SCREAMING_SNAKE_CASE_ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE_ = dropout_rate
SCREAMING_SNAKE_CASE_ = initializer_factor
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = decoder_start_token_id
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = decoder_layers
def _lowercase (self ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE_ = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = config.num_attention_heads
SCREAMING_SNAKE_CASE_ = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, input_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase (self ):
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase (self ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = UMTaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = model(input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = result.last_hidden_state
SCREAMING_SNAKE_CASE_ = result.past_key_values
SCREAMING_SNAKE_CASE_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder().to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) + 1 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).half().eval()
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE_ ).any().item() )
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=SCREAMING_SNAKE_CASE_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = config_and_inputs[0]
SCREAMING_SNAKE_CASE_ = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
model.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE_ , head_masking.items() ):
SCREAMING_SNAKE_CASE_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE_ , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _lowercase (self ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE_ , legacy=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE_ ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model.generate(input_ids.to(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 626 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = 1
lowerCAmelCase : Tuple = 3
lowerCAmelCase : List[str] = (32, 32)
lowerCAmelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=snake_case__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Dict = self.dummy_cond_unet_upscale
lowerCAmelCase : str = DDPMScheduler()
lowerCAmelCase : List[str] = DDIMScheduler(prediction_type="v_prediction" )
lowerCAmelCase : str = self.dummy_vae
lowerCAmelCase : str = self.dummy_text_encoder
lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : int = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase : Any = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=350 , )
lowerCAmelCase : Union[str, Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase : Optional[Any] = torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCAmelCase : Dict = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : str = output.images
lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCAmelCase : Dict = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=snake_case__ , )[0]
lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
lowerCAmelCase : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCAmelCase : List[str] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : int = self.dummy_cond_unet_upscale
lowerCAmelCase : Optional[int] = DDPMScheduler()
lowerCAmelCase : Union[str, Any] = DDIMScheduler(prediction_type="v_prediction" )
lowerCAmelCase : Any = self.dummy_vae
lowerCAmelCase : Dict = self.dummy_text_encoder
lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : Optional[int] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase : int = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=350 , )
lowerCAmelCase : Any = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase : Dict = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : List[str] = output.images
assert image.shape[0] == 2
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCAmelCase : int = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.dummy_cond_unet_upscale
lowerCAmelCase : str = DDPMScheduler()
lowerCAmelCase : Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
lowerCAmelCase : int = self.dummy_vae
lowerCAmelCase : Any = self.dummy_text_encoder
lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : int = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCAmelCase : List[str] = unet.half()
lowerCAmelCase : Any = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase : Tuple = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=350 , )
lowerCAmelCase : List[str] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase : int = torch.manual_seed(0 )
lowerCAmelCase : Dict = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ).images
lowerCAmelCase : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCAmelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
lowerCAmelCase : Optional[int] = "stabilityai/stable-diffusion-x4-upscaler"
lowerCAmelCase : int = StableDiffusionUpscalePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Tuple = "a cat sitting on a park bench"
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
lowerCAmelCase : Any = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCAmelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
lowerCAmelCase : List[str] = "stabilityai/stable-diffusion-x4-upscaler"
lowerCAmelCase : Dict = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Optional[Any] = "a cat sitting on a park bench"
lowerCAmelCase : Any = torch.manual_seed(0 )
lowerCAmelCase : str = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCAmelCase : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
lowerCAmelCase : Dict = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : List[Any] = "a cat sitting on a park bench"
lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase : Any = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=5 , output_type="np" , )
lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 645 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] ="""data2vec-text"""
def __init__( self , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = vocab_size
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :Any = num_attention_heads
UpperCamelCase :Dict = hidden_act
UpperCamelCase :Optional[Any] = intermediate_size
UpperCamelCase :Union[str, Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :List[str] = max_position_embeddings
UpperCamelCase :Any = type_vocab_size
UpperCamelCase :Any = initializer_range
UpperCamelCase :Dict = layer_norm_eps
UpperCamelCase :Union[str, Any] = position_embedding_type
UpperCamelCase :Any = use_cache
UpperCamelCase :List[str] = classifier_dropout
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Optional[int]:
if self.task == "multiple-choice":
UpperCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase :List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 658 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
re.sub('<n>' , '' , __SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__SCREAMING_SNAKE_CASE ) )
| 375 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = F"""{sampling_rate}"""
lowercase__ = '''1'''
lowercase__ = '''f32le'''
lowercase__ = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
lowercase__ = output_stream[0]
lowercase__ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = "f32le" , ):
'''simple docstring'''
lowercase__ = F"""{sampling_rate}"""
lowercase__ = '''1'''
if format_for_conversion == "s16le":
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowercase__ = platform.system()
if system == "Linux":
lowercase__ = '''alsa'''
lowercase__ = '''default'''
elif system == "Darwin":
lowercase__ = '''avfoundation'''
lowercase__ = ''':0'''
elif system == "Windows":
lowercase__ = '''dshow'''
lowercase__ = '''default'''
lowercase__ = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowercase__ = stream_chunk_s
else:
lowercase__ = chunk_length_s
lowercase__ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
lowercase__ = np.intaa
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = np.floataa
lowercase__ = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowercase__ = chunk_length_s / 6
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
lowercase__ = [stride_length_s, stride_length_s]
lowercase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ = datetime.datetime.now()
lowercase__ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
lowercase__ = np.frombuffer(item['''raw'''] , dtype=__SCREAMING_SNAKE_CASE )
lowercase__ = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
lowercase__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ):
'''simple docstring'''
lowercase__ = b''''''
lowercase__ , lowercase__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowercase__ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
lowercase__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
lowercase__ = (_stride_left, stride_right)
lowercase__ = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
lowercase__ = False
yield item
lowercase__ = stride_left
lowercase__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
lowercase__ = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
lowercase__ = False
yield item
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
lowercase__ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 183 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger()
def _UpperCAmelCase ( __A : Dict , __A : Dict , __A : str , __A : List[str] , __A : Union[str, Any] = True ):
print(f'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
a_ : List[Any] = timm.create_model('''levit_128s''' , pretrained=__SCREAMING_SNAKE_CASE )
else:
a_ : int = timm.create_model('''levit_128''' , pretrained=__SCREAMING_SNAKE_CASE )
if hidden_sizes == 1_92:
a_ : Optional[int] = timm.create_model('''levit_192''' , pretrained=__SCREAMING_SNAKE_CASE )
if hidden_sizes == 2_56:
a_ : int = timm.create_model('''levit_256''' , pretrained=__SCREAMING_SNAKE_CASE )
if hidden_sizes == 3_84:
a_ : Dict = timm.create_model('''levit_384''' , pretrained=__SCREAMING_SNAKE_CASE )
from_model.eval()
a_ : List[str] = LevitForImageClassificationWithTeacher(__SCREAMING_SNAKE_CASE ).eval()
a_ : Dict = OrderedDict()
a_ : str = from_model.state_dict()
a_ : int = list(from_model.state_dict().keys() )
a_ : List[str] = list(our_model.state_dict().keys() )
print(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
a_ : List[str] = weights[og_keys[i]]
our_model.load_state_dict(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = torch.randn((2, 3, 2_24, 2_24) )
a_ : Dict = from_model(__SCREAMING_SNAKE_CASE )
a_ : Dict = our_model(__SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
a_ : int = name
print(__SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a_ : Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( __A : int , __A : Any = None , __A : int = True ):
a_ : List[str] = '''imagenet-1k-id2label.json'''
a_ : List[str] = 10_00
a_ : List[str] = (1, num_labels)
a_ : Optional[Any] = '''huggingface/label-files'''
a_ : Optional[Any] = num_labels
a_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
a_ : Tuple = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
a_ : str = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
a_ : Optional[Any] = partial(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
a_ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __SCREAMING_SNAKE_CASE , names_to_config[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 466 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
from collections.abc import Callable
def __UpperCAmelCase ( A : Optional[int] , A : Union[str, Any] , A : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[Any] = a
UpperCAmelCase_ : List[str] = b
if function(__SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(__SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCAmelCase_ : Optional[int] = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(__SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) < 0:
UpperCAmelCase_ : Optional[int] = mid
else:
UpperCAmelCase_ : Any = mid
UpperCAmelCase_ : Tuple = start + (end - start) / 2.0
return mid
def __UpperCAmelCase ( A : int ) -> List[Any]:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 541 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Optional[Any] = 1_6
snake_case__ : Optional[int] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='longest' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : str = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __SCREAMING_SNAKE_CASE) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
UpperCamelCase_ = int(args.local_sgd_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE)
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__SCREAMING_SNAKE_CASE)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE):
model.train()
with LocalSGD(
accelerator=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , local_sgd_steps=__SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None) as local_sgd:
for step, batch in enumerate(__SCREAMING_SNAKE_CASE):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE):
UpperCamelCase_ = model(**__SCREAMING_SNAKE_CASE)
UpperCamelCase_ = output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__SCREAMING_SNAKE_CASE)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=__SCREAMING_SNAKE_CASE , default=8 , help='Number of local SGD steps or None to disable local SGD')
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if __name__ == "__main__":
main()
| 23 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 | 0 |
"""simple docstring"""
def _A ( _a : Optional[int] ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A = f'Input value of [number={number}] must be an integer'
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 1:
A = f'Input value of [number={number}] must be > 0'
raise ValueError(__SCREAMING_SNAKE_CASE )
A = 1
for i in range(1 , __SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
__A = 8.9_8_8E9 # units = N * m^s * C^-2
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
lowercase__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase__ = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase__ = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase__ = (COULOMBS_CONSTANT * charge_product / abs(__SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = False ) -> Optional[int]:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
UpperCamelCase_: List[str] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
UpperCamelCase_: Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
UpperCamelCase_: int = primes[:idx]
break
UpperCamelCase_ ,UpperCamelCase_: List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCamelCase_: List[str] = False
for r in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase_: int = pow(__SCREAMING_SNAKE_CASE , d * 2**r , __SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCamelCase_: Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def snake_case () -> Optional[int]:
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 57 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowerCamelCase ( __a, __a, __a, __a, __a ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE_ = getattr(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.feature_extractor
SCREAMING_SNAKE_CASE_ = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, hf_model.config.feat_extract_norm == '''group''', )
SCREAMING_SNAKE_CASE_ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('''*''', __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE_ = '''weight'''
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowerCamelCase ( __a, __a, __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( __a, __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE_ = int(items[1] )
else:
SCREAMING_SNAKE_CASE_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE_ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, bias=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
SCREAMING_SNAKE_CASE_ = WavaVecaConfig.from_pretrained(
__SCREAMING_SNAKE_CASE, add_adapter=__SCREAMING_SNAKE_CASE, adapter_stride=__SCREAMING_SNAKE_CASE, adapter_kernel_size=__SCREAMING_SNAKE_CASE, use_auth_token=__SCREAMING_SNAKE_CASE, output_hidden_size=__SCREAMING_SNAKE_CASE, )
SCREAMING_SNAKE_CASE_ = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
# load model
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
SCREAMING_SNAKE_CASE_ = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE, use_auth_token=__SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE_ = WavaVecaModel(__SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder, __SCREAMING_SNAKE_CASE )
# load decoder weights
SCREAMING_SNAKE_CASE_ = MBartForCausalLM(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__SCREAMING_SNAKE_CASE )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE, decoder=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = '''mbart50'''
SCREAMING_SNAKE_CASE_ = '''wav2vec2'''
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = 250_004
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250004, type=int, help='`decoder_start_token_id` of model config')
lowerCAmelCase__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
) | 626 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase__ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
for attribute in key.split("." ):
lowerCAmelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowerCAmelCase : Dict = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
lowerCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase : Dict = value
elif weight_type == "weight_g":
lowerCAmelCase : str = value
elif weight_type == "weight_v":
lowerCAmelCase : Union[str, Any] = value
elif weight_type == "bias":
lowerCAmelCase : Tuple = value
else:
lowerCAmelCase : Any = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = []
lowerCAmelCase : List[Any] = fairseq_model.state_dict()
lowerCAmelCase : Optional[int] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
lowerCAmelCase : List[Any] = name.split(__SCREAMING_SNAKE_CASE )[0].split("." )[-2]
lowerCAmelCase : List[str] = mapped_key.replace("*" , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowerCAmelCase : Any = "weight_g"
elif "weight_v" in name:
lowerCAmelCase : List[str] = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowerCAmelCase : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase : Tuple = "weight"
else:
lowerCAmelCase : List[str] = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : int = full_name.split("conv_layers." )[-1]
lowerCAmelCase : List[str] = name.split("." )
lowerCAmelCase : Tuple = int(items[0] )
lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = torch.load(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint["cfg"] )
lowerCAmelCase : List[Any] = WavLMOrig(__SCREAMING_SNAKE_CASE )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowerCAmelCase : Dict = WavLMConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : List[Any] = WavLMConfig()
lowerCAmelCase : Optional[Any] = WavLMModel(__SCREAMING_SNAKE_CASE )
recursively_load_weights(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
hf_wavlm.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 645 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCamelCase :Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase :Tuple = DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ )
self.assertTrue(isinstance(dc.token_ids , SCREAMING_SNAKE_CASE_ ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase ( self ) -> Tuple:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCamelCase :str = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ ) # fails here
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :str = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase :Dict = DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = dc.update(1 )
UpperCamelCase :Any = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = dc.update(2 )
UpperCamelCase :Dict = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = dc.update(3 )
UpperCamelCase :List[str] = stepped is True and completed is True and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase :int = DisjunctiveConstraint(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase :str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 658 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class snake_case_ ( __lowerCamelCase ):
'''simple docstring'''
__UpperCamelCase = """audio-spectrogram-transformer"""
def __init__( self : List[str] , __lowerCamelCase : List[str]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Tuple=3_072 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : int=0.02 , __lowerCamelCase : Dict=1E-1_2 , __lowerCamelCase : str=16 , __lowerCamelCase : str=True , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[Any]=1_024 , __lowerCamelCase : List[str]=128 , **__lowerCamelCase : List[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = patch_size
__lowercase = qkv_bias
__lowercase = frequency_stride
__lowercase = time_stride
__lowercase = max_length
__lowercase = num_mel_bins
| 375 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.unet.config.in_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( __lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = LDMTextToImagePipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowercase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = False
def lowercase__ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
lowercase__ = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=(32, 64), in_channels=3, out_channels=3, down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D'''), up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D'''), latent_channels=4, )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
lowercase__ = CLIPTextModel(lowerCamelCase )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase__ ( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Optional[int]=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = LDMTextToImagePipeline(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowercase__ = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : Any=torch.floataa, lowerCamelCase : Optional[int]=0 ):
'''simple docstring'''
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) )
lowercase__ = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase, dtype=lowerCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_inputs(lowerCamelCase )
lowercase__ = pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowercase__ = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
lowercase__ = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any]=torch.floataa, lowerCamelCase : Optional[int]=0 ):
'''simple docstring'''
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) )
lowercase__ = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase, dtype=lowerCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_inputs(lowerCamelCase )
lowercase__ = pipe(**lowerCamelCase ).images[0]
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
lowercase__ = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 183 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : List[str] ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('''only integers accepted as input''' )
else:
a_ : str = str(abs(__SCREAMING_SNAKE_CASE ) )
a_ : List[Any] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int(''''''.join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 466 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_UpperCamelCase : int = True
from torch.cuda.amp import autocast
_UpperCamelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class snake_case__ :
a_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
a_ = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a_ = field(
default=__lowerCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."})
a_ = field(
default=__lowerCamelCase , metadata={"help": "Whether to log verbose messages or not."} , )
a_ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."})
a_ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."})
a_ = field(
default=0.99_99_95 , metadata={"help": "Decay of gumbel temperature during training."})
def __UpperCAmelCase ( A : int , A : Tuple ) -> Union[str, Any]:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase_ : int = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase_ : Tuple = logging.INFO
logger.setLevel(__SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__ :
a_ = field(
default=__lowerCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."})
a_ = field(
default=__lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
a_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
a_ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
a_ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
a_ = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."})
a_ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
a_ = field(
default=__lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
a_ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"})
@dataclass
class snake_case__ :
a_ = 42
a_ = 42
a_ = "longest"
a_ = None
a_ = None
def __call__( self : int , _A : List[str] ) -> List[Any]:
# reformat list to dict and set to pytorch format
UpperCAmelCase_ : Optional[Any] = self.feature_extractor.pad(
_A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
UpperCAmelCase_ : List[Any] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
UpperCAmelCase_ : Any = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
UpperCAmelCase_ : Dict = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase_ : Any = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_A , min_masks=2 , )
return batch
class snake_case__ ( __lowerCamelCase):
def __init__( self : Dict , *_A : Optional[Any] , _A : Optional[int]=1 , _A : Optional[Any]=0 , _A : Tuple=1.0 , **_A : Any ) -> Tuple:
super().__init__(*_A , **_A )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = max_gumbel_temp
UpperCAmelCase_ : Tuple = min_gumbel_temp
UpperCAmelCase_ : List[Any] = gumbel_temp_decay
def A ( self : int , _A : Tuple , _A : Optional[int] ) -> Optional[Any]:
model.train()
UpperCAmelCase_ : Union[str, Any] = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
UpperCAmelCase_ : int = self.compute_loss(_A , _A )
else:
UpperCAmelCase_ : Union[str, Any] = self.compute_loss(_A , _A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase_ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase_ : Dict = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase_ : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __UpperCAmelCase ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()
configure_logger(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ : Any = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ : Tuple = DatasetDict()
UpperCAmelCase_ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ : Dict = DatasetDict()
UpperCAmelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__SCREAMING_SNAKE_CASE )
def prepare_dataset(A : int ):
# check that all files have the correct sampling rate
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase_ : Optional[Any] = datasets.map(
__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
UpperCAmelCase_ : Optional[int] = vectorized_datasets.filter(
lambda A : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A : Optional[Any] ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase_ : Any = vectorized_datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase_ : Any = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
UpperCAmelCase_ : Union[str, Any] = WavaVecaForPreTraining(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = DataCollatorForWavaVecaPretraining(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = WavaVecaPreTrainer(
model=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__SCREAMING_SNAKE_CASE , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 541 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
A_ = CycleDiffusionPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
A_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self ) -> int:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> Optional[int]:
UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCamelCase_ = image / 2 + 0.5
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = CycleDiffusionPipeline(**_UpperCAmelCase )
UpperCamelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs(_UpperCAmelCase )
UpperCamelCase_ = pipe(**_UpperCAmelCase )
UpperCamelCase_ = output.images
UpperCamelCase_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCamelCase_ = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(_UpperCAmelCase , 'half' ):
UpperCamelCase_ = module.half()
UpperCamelCase_ = CycleDiffusionPipeline(**_UpperCAmelCase )
UpperCamelCase_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs(_UpperCAmelCase )
UpperCamelCase_ = pipe(**_UpperCAmelCase )
UpperCamelCase_ = output.images
UpperCamelCase_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCamelCase_ = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _UpperCAmelCase ( self ) -> str:
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def _UpperCAmelCase ( self ) -> Any:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCAmelCase ( self ) -> Optional[int]:
return super().test_save_load_optional_components()
@skip_mps
def _UpperCAmelCase ( self ) -> Any:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
UpperCamelCase_ = init_image.resize((512, 512) )
UpperCamelCase_ = 'CompVis/stable-diffusion-v1-4'
UpperCamelCase_ = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='scheduler' )
UpperCamelCase_ = CycleDiffusionPipeline.from_pretrained(
_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCamelCase_ = 'A black colored car'
UpperCamelCase_ = 'A blue colored car'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=_UpperCAmelCase , source_prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
UpperCamelCase_ = init_image.resize((512, 512) )
UpperCamelCase_ = 'CompVis/stable-diffusion-v1-4'
UpperCamelCase_ = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='scheduler' )
UpperCamelCase_ = CycleDiffusionPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCamelCase_ = 'A black colored car'
UpperCamelCase_ = 'A blue colored car'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=_UpperCAmelCase , source_prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
from __future__ import annotations
def _A ( _a : Any , _a : Tuple ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(__SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(__SCREAMING_SNAKE_CASE , n - 1 )
def _A ( _a : Optional[Any] , _a : List[Any] ):
"""simple docstring"""
if index >= len(__SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(__SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
UpperCAmelCase =input("Enter integers separated by spaces: ")
UpperCAmelCase =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 617 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _A ( lowercase__ = 8 ):
lowercase__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def _A ( lowercase__ , lowercase__ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__SCREAMING_SNAKE_CASE )
lowercase__ = i // 3
lowercase__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase__ = (
chars_incl
+ random(__SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
+ random(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
)
lowercase__ = list(__SCREAMING_SNAKE_CASE )
shuffle(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _A ( lowercase__ , lowercase__ ):
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def _A ( lowercase__ , lowercase__ ):
pass # Put your code here...
def _A ( lowercase__ , lowercase__ ):
pass # Put your code here...
def _A ( lowercase__ , lowercase__ ):
pass # Put your code here...
def _A ( lowercase__ , lowercase__ = 8 ):
if len(__SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase__ = any(char in ascii_uppercase for char in password )
lowercase__ = any(char in ascii_lowercase for char in password )
lowercase__ = any(char in digits for char in password )
lowercase__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _A ( ):
lowercase__ = int(input("""Please indicate the max length of your password: """ ).strip() )
lowercase__ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(__SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 325 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =LongformerTokenizer
a : Any =True
a : Optional[Any] =LongformerTokenizerFast
a : Union[str, Any] =True
def _a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase_: List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_: List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase_: List[str] = {'unk_token': '<unk>'}
UpperCamelCase_: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = 'lower newer'
UpperCamelCase_: List[Any] = 'lower newer'
return input_text, output_text
def _a ( self ):
UpperCamelCase_: List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: Dict = 'lower newer'
UpperCamelCase_: str = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase_: List[str] = tokenizer.tokenize(_lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase_: Optional[int] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_lowerCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_lowerCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def _a ( self ):
UpperCamelCase_: str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
UpperCamelCase_: str = tokenizer.encode('sequence builders' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Any = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: str = tokenizer.encode(
'sequence builders' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCamelCase_: List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self ):
UpperCamelCase_: Dict = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = 'Encode this sequence.'
UpperCamelCase_: str = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
UpperCamelCase_: Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCamelCase_: Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Dict = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCamelCase_: Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
UpperCamelCase_: List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase )
# Testing spaces after special tokens
UpperCamelCase_: List[Any] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )} ) # mask token has a left space
UpperCamelCase_: Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
UpperCamelCase_: Tuple = 'Encode <mask> sequence'
UpperCamelCase_: Any = 'Encode <mask>sequence'
UpperCamelCase_: List[str] = tokenizer.encode(_lowerCamelCase )
UpperCamelCase_: Tuple = encoded.index(_lowerCamelCase )
UpperCamelCase_: Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = tokenizer.encode(_lowerCamelCase )
UpperCamelCase_: str = encoded.index(_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self ):
pass
def _a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Any = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: str = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = 'A, <mask> AllenNLP sentence.'
UpperCamelCase_: Optional[Any] = tokenizer_r.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = tokenizer_p.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCamelCase_: List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCamelCase_: int = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase_: Tuple = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase_: int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _lowerCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _lowerCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _lowerCamelCase )
def _a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: List[str] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase_: Any = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase_: Optional[int] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ), len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: Any = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ), len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: Union[str, Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase_: int = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ) + 1, 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: str = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: List[Any] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ), 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: Any = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ), 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , ) | 57 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
"""simple docstring"""
def _lowerCamelCase ( __a, __a ):
return number | (1 << position)
def _lowerCamelCase ( __a, __a ):
return number & ~(1 << position)
def _lowerCamelCase ( __a, __a ):
return number ^ (1 << position)
def _lowerCamelCase ( __a, __a ):
return ((number >> position) & 1) == 1
def _lowerCamelCase ( __a, __a ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 626 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
"""simple docstring"""
a : Dict =[
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **snake_case__ ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase : int = deprecated_arg[3:]
lowerCAmelCase : List[str] = not kwargs.pop(snake_case__ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowerCAmelCase : str = kwargs.pop("tpu_name" , self.tpu_name )
lowerCAmelCase : Union[str, Any] = kwargs.pop("device_idx" , self.device_idx )
lowerCAmelCase : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
lowerCAmelCase : Tuple = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**snake_case__ )
a : str =field(
default=__lowerCamelCase , metadata={"help": "Name of TPU"} , )
a : int =field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
a : bool =field(default=__lowerCamelCase , metadata={"help": "Benchmark models in eager model."} )
a : bool =field(
default=__lowerCamelCase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
lowerCAmelCase : List[str] = None
if self.tpu:
try:
if self.tpu_name:
lowerCAmelCase : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCAmelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCAmelCase : List[Any] = None
return tpu
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCAmelCase : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
lowerCAmelCase : Tuple = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
lowerCAmelCase : Optional[Any] = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def lowercase__ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 645 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
from collections import namedtuple
__snake_case = namedtuple("""from_to""", """from_ to""")
__snake_case = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_0_1, 10_00),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
"""cubicyard""": from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
"""cubicfoot""": from_to(0.0_2_8, 3_5.3_1_4_7),
"""cup""": from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(__SCREAMING_SNAKE_CASE ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(__SCREAMING_SNAKE_CASE ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ : Tuple = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : List[Any] = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : List[str] = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 375 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
a_ : Optional[int] = tempfile.mkdtemp()
a_ : int = 5
# Realm tok
a_ : Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : int = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a_ : List[str] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : int = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
a_ : Tuple = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=__SCREAMING_SNAKE_CASE , )
return block_records
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : Any = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
a_ : str = self.get_config()
a_ : Tuple = self.get_dummy_retriever()
a_ : str = retriever.tokenizer
a_ : Any = np.array([0, 3] , dtype='''long''' )
a_ : Tuple = tokenizer(['''Test question'''] ).input_ids
a_ : Dict = tokenizer(
['''the fourth'''] , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , ).input_ids
a_ : Optional[Any] = config.reader_seq_len
a_ , a_ , a_ , a_ : Tuple = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , answer_ids=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : int = self.get_config()
a_ : List[str] = self.get_dummy_retriever()
a_ : Optional[int] = retriever.tokenizer
a_ : List[str] = np.array([0, 3, 5] , dtype='''long''' )
a_ : Dict = tokenizer(['''Test question'''] ).input_ids
a_ : Union[str, Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , ).input_ids
a_ : Dict = config.reader_seq_len
a_ , a_ , a_ , a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , answer_ids=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
self.assertEqual([False, True, True] , __SCREAMING_SNAKE_CASE )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __SCREAMING_SNAKE_CASE )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
a_ : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
a_ : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
a_ : str = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 466 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( __lowerCamelCase):
a_ = (EulerDiscreteScheduler,)
a_ = 10
def A ( self : Union[str, Any] , **_A : Tuple ) -> List[Any]:
UpperCAmelCase_ : Any = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_A )
return config
def A ( self : List[str] ) -> Union[str, Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_A )
def A ( self : Any ) -> Any:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def A ( self : Any ) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def A ( self : int ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : str = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : Dict = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : int = scheduler.scale_model_input(_A , _A )
UpperCAmelCase_ : Any = model(_A , _A )
UpperCAmelCase_ : Dict = scheduler.step(_A , _A , _A , generator=_A )
UpperCAmelCase_ : Any = output.prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_A ) )
UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : int = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Any = scheduler.scale_model_input(_A , _A )
UpperCAmelCase_ : Union[str, Any] = model(_A , _A )
UpperCAmelCase_ : Optional[Any] = scheduler.step(_A , _A , _A , generator=_A )
UpperCAmelCase_ : str = output.prev_sample
UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_A ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase_ : Optional[int] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase_ : Union[str, Any] = sample.to(_A )
for t in scheduler.timesteps:
UpperCAmelCase_ : Union[str, Any] = scheduler.scale_model_input(_A , _A )
UpperCAmelCase_ : str = model(_A , _A )
UpperCAmelCase_ : List[Any] = scheduler.step(_A , _A , _A , generator=_A )
UpperCAmelCase_ : Tuple = output.prev_sample
UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_A ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def A ( self : Dict ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase_ : List[str] = sample.to(_A )
for t in scheduler.timesteps:
UpperCAmelCase_ : Optional[int] = scheduler.scale_model_input(_A , _A )
UpperCAmelCase_ : int = model(_A , _A )
UpperCAmelCase_ : List[str] = scheduler.step(_A , _A , _A , generator=_A )
UpperCAmelCase_ : Dict = output.prev_sample
UpperCAmelCase_ : List[str] = torch.sum(torch.abs(_A ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 541 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : str = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _a ( __lowerCamelCase ):
"""simple docstring"""
A_ = """conditional_detr"""
A_ = ["""past_key_values"""]
A_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=300 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.2_5 , **_UpperCAmelCase , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCamelCase_ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = backbone_config.get('model_type' )
UpperCamelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ = config_class.from_dict(_UpperCAmelCase )
UpperCamelCase_ = use_timm_backbone
UpperCamelCase_ = backbone_config
UpperCamelCase_ = num_channels
UpperCamelCase_ = num_queries
UpperCamelCase_ = d_model
UpperCamelCase_ = encoder_ffn_dim
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = encoder_attention_heads
UpperCamelCase_ = decoder_ffn_dim
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = decoder_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = activation_function
UpperCamelCase_ = init_std
UpperCamelCase_ = init_xavier_std
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = decoder_layerdrop
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = auxiliary_loss
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = backbone
UpperCamelCase_ = use_pretrained_backbone
UpperCamelCase_ = dilation
# Hungarian matcher
UpperCamelCase_ = class_cost
UpperCamelCase_ = bbox_cost
UpperCamelCase_ = giou_cost
# Loss coefficients
UpperCamelCase_ = mask_loss_coefficient
UpperCamelCase_ = dice_loss_coefficient
UpperCamelCase_ = cls_loss_coefficient
UpperCamelCase_ = bbox_loss_coefficient
UpperCamelCase_ = giou_loss_coefficient
UpperCamelCase_ = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ) -> Any:
return self.d_model
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase_ = self.backbone_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _a ( __lowerCamelCase ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return 1e-5
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return 12
| 23 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _A ( _a : Tuple ):
"""simple docstring"""
A = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
A = 4
A = 4_8
A = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A = [6, 6, 6, 6]
A = 6_0
A = [6, 6, 6, 6]
A = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A = 4
A = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
A = 1
A = 1
A = 1_2_6
A = 7
A = 2_5_5.0
A = """"""
return config
def _A ( _a : Any , _a : Optional[Any] ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
A = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
A = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
A = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
A = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
A = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
A = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
A = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
A = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
A = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
A = """layernorm.weight"""
if name == "norm.bias":
A = """layernorm.bias"""
if "conv_first" in name:
A = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
A = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
A = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
A = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
A = name.replace("""upsample.2""" , """upsample.convolution_1""" )
A = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
A = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
A = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
A = """swin2sr.""" + name
return name
def _A ( _a : int , _a : List[str] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "qkv" in key:
A = key.split(""".""" )
A = int(key_split[1] )
A = int(key_split[4] )
A = config.embed_dim
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
pass
else:
A = val
return orig_state_dict
def _A ( _a : List[str] , _a : Optional[int] , _a : Union[str, Any] ):
"""simple docstring"""
A = get_config(__SCREAMING_SNAKE_CASE )
A = SwinaSRForImageSuperResolution(__SCREAMING_SNAKE_CASE )
model.eval()
A = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
A = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A , A = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(__SCREAMING_SNAKE_CASE ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
A = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
A = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" )
A = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
A = 1_2_6 if """Jpeg""" in checkpoint_url else 2_5_6
A = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
A = transforms(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
if config.num_channels == 1:
A = pixel_values[:, 0, :, :].unsqueeze(1 )
A = model(__SCREAMING_SNAKE_CASE )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
A = torch.Size([1, 3, 5_1_2, 5_1_2] )
A = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
A = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
A = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
A = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A = torch.Size([1, 3, 5_1_2, 5_1_2] )
A = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
A = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3 )
print("""Looks ok!""" )
A = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
A = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 617 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84 | 0 |
'''simple docstring'''
__A = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def _A ( lowercase__ ):
assert type(__SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(__SCREAMING_SNAKE_CASE )
lowercase__ = int(__SCREAMING_SNAKE_CASE )
lowercase__ = """"""
lowercase__ = False
if decimal < 0:
lowercase__ = True
decimal *= -1
while decimal > 0:
lowercase__ , lowercase__ = divmod(__SCREAMING_SNAKE_CASE , 16 )
lowercase__ = values[remainder] + hexadecimal
lowercase__ = """0x""" + hexadecimal
if negative:
lowercase__ = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _lowerCAmelCase( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
UpperCamelCase_: Optional[int] = value_function
UpperCamelCase_: Union[str, Any] = unet
UpperCamelCase_: str = scheduler
UpperCamelCase_: Optional[Any] = env
UpperCamelCase_: int = env.get_dataset()
UpperCamelCase_: int = {}
for key in self.data.keys():
try:
UpperCamelCase_: List[Any] = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase_: str = {}
for key in self.data.keys():
try:
UpperCamelCase_: int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase_: str = env.observation_space.shape[0]
UpperCamelCase_: str = env.action_space.shape[0]
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
return (x_in - self.means[key]) / self.stds[key]
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
return x_in * self.stds[key] + self.means[key]
def _a ( self , _lowerCamelCase ):
if type(_lowerCamelCase ) is dict:
return {k: self.to_torch(_lowerCamelCase ) for k, v in x_in.items()}
elif torch.is_tensor(_lowerCamelCase ):
return x_in.to(self.unet.device )
return torch.tensor(_lowerCamelCase , device=self.unet.device )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for key, val in cond.items():
UpperCamelCase_: List[str] = val.clone()
return x_in
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = x.shape[0]
UpperCamelCase_: Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase_: List[str] = torch.full((batch_size,) , _lowerCamelCase , device=self.unet.device , dtype=torch.long )
for _ in range(_lowerCamelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase_: List[Any] = self.value_function(x.permute(0 , 2 , 1 ) , _lowerCamelCase ).sample
UpperCamelCase_: Union[str, Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase_: Tuple = self.scheduler._get_variance(_lowerCamelCase )
UpperCamelCase_: str = torch.exp(0.5 * posterior_variance )
UpperCamelCase_: Optional[int] = model_std * grad
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: Optional[Any] = x.detach()
UpperCamelCase_: Optional[int] = x + scale * grad
UpperCamelCase_: Tuple = self.reset_xa(_lowerCamelCase , _lowerCamelCase , self.action_dim )
UpperCamelCase_: Optional[Any] = self.unet(x.permute(0 , 2 , 1 ) , _lowerCamelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase_: Dict = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , predict_epsilon=_lowerCamelCase )['prev_sample']
# apply conditions to the trajectory (set the initial state)
UpperCamelCase_: Dict = self.reset_xa(_lowerCamelCase , _lowerCamelCase , self.action_dim )
UpperCamelCase_: str = self.to_torch(_lowerCamelCase )
return x, y
def __call__( self , _lowerCamelCase , _lowerCamelCase=6_4 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase_: str = self.normalize(_lowerCamelCase , 'observations' )
UpperCamelCase_: Any = obs[None].repeat(_lowerCamelCase , axis=0 )
UpperCamelCase_: str = {0: self.to_torch(_lowerCamelCase )}
UpperCamelCase_: int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase_: Any = randn_tensor(_lowerCamelCase , device=self.unet.device )
UpperCamelCase_: str = self.reset_xa(_lowerCamelCase , _lowerCamelCase , self.action_dim )
UpperCamelCase_: Union[str, Any] = self.to_torch(_lowerCamelCase )
# run the diffusion process
UpperCamelCase_ ,UpperCamelCase_: List[str] = self.run_diffusion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# sort output trajectories by value
UpperCamelCase_: Union[str, Any] = y.argsort(0 , descending=_lowerCamelCase ).squeeze()
UpperCamelCase_: str = x[sorted_idx]
UpperCamelCase_: Any = sorted_values[:, :, : self.action_dim]
UpperCamelCase_: Any = actions.detach().cpu().numpy()
UpperCamelCase_: Dict = self.de_normalize(_lowerCamelCase , key='actions' )
# select the action with the highest value
if y is not None:
UpperCamelCase_: Dict = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase_: int = np.random.randint(0 , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = denorm_actions[selected_index, 0]
return denorm_actions | 57 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84 | 0 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
lowerCAmelCase__ = 'path-to-your-trained-model'
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 64, 64)
lowerCAmelCase__ = torch.rand(1) * 999
lowerCAmelCase__ = torch.randn(2, 77, 768)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 666
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'generator': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png') | 626 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase__ = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any=None ):
'''simple docstring'''
if rng is None:
lowerCAmelCase : str = random.Random()
lowerCAmelCase : str = 1
for dim in shape:
total_dims *= dim
lowerCAmelCase : Any = []
for _ in range(__SCREAMING_SNAKE_CASE ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCAmelCase : List[str] = np.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ).reshape(__SCREAMING_SNAKE_CASE )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = ids_tensor(__SCREAMING_SNAKE_CASE , vocab_size=2 , rng=__SCREAMING_SNAKE_CASE )
# make sure that at least one token is attended to for each batch
lowerCAmelCase : Optional[Any] = 1
return attn_mask
@require_flax
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : int =None
a : Any =()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCAmelCase : Union[str, Any] = 2
lowerCAmelCase : Any = inputs["input_ids"].shape[-1] // 2
lowerCAmelCase : str = inputs["input_ids"][:max_batch_size, :sequence_length]
lowerCAmelCase : Any = jnp.ones_like(snake_case__ )
lowerCAmelCase : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCAmelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCAmelCase : Any = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = self._get_input_ids_and_config()
lowerCAmelCase : str = False
lowerCAmelCase : Optional[int] = max_length
lowerCAmelCase : int = 0
for model_class in self.all_generative_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase : List[str] = getattr(snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = pt_model_class(snake_case__ ).eval()
lowerCAmelCase : Any = load_flax_weights_in_pytorch_model(snake_case__ , flax_model.params )
lowerCAmelCase : Union[str, Any] = flax_model.generate(snake_case__ ).sequences
lowerCAmelCase : Tuple = pt_model.generate(torch.tensor(snake_case__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCAmelCase : str = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase : List[Any] = model_class(snake_case__ )
lowerCAmelCase : Union[str, Any] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCAmelCase : Optional[Any] = jit(model.generate )
lowerCAmelCase : Dict = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = self._get_input_ids_and_config()
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : List[Any] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCAmelCase : Tuple = jit(model.generate )
lowerCAmelCase : Dict = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = self._get_input_ids_and_config()
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[Any] = max_length
lowerCAmelCase : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase : List[Any] = model_class(snake_case__ )
lowerCAmelCase : Dict = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCAmelCase : Optional[int] = jit(model.generate )
lowerCAmelCase : Any = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = self._get_input_ids_and_config()
lowerCAmelCase : Dict = False
lowerCAmelCase : List[str] = max_length
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : int = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase : Optional[Any] = model_class(snake_case__ )
lowerCAmelCase : Optional[int] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = self._get_input_ids_and_config()
lowerCAmelCase : int = True
lowerCAmelCase : str = max_length
lowerCAmelCase : Tuple = 0.8
lowerCAmelCase : List[Any] = 10
lowerCAmelCase : List[str] = 0.3
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : Dict = 8
lowerCAmelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase : Optional[int] = model_class(snake_case__ )
lowerCAmelCase : Optional[int] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCAmelCase : Optional[int] = jit(model.generate )
lowerCAmelCase : Tuple = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
lowerCAmelCase : str = max_length
lowerCAmelCase : Optional[Any] = 1
lowerCAmelCase : List[str] = 8
lowerCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase : Optional[Any] = model_class(snake_case__ )
lowerCAmelCase : int = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCAmelCase : Dict = jit(model.generate )
lowerCAmelCase : List[Any] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = self._get_input_ids_and_config()
lowerCAmelCase : str = max_length
lowerCAmelCase : Union[str, Any] = 2
lowerCAmelCase : int = 1
lowerCAmelCase : Optional[Any] = 8
lowerCAmelCase : str = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Union[str, Any] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCAmelCase : Union[str, Any] = jit(model.generate )
lowerCAmelCase : str = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : str = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCAmelCase : Dict = jit(model.generate )
lowerCAmelCase : Optional[Any] = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase : Any = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
lowerCAmelCase : Optional[int] = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCAmelCase : Optional[int] = jit(model.generate )
lowerCAmelCase : Any = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase : str = 2
lowerCAmelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase : Tuple = model_class(snake_case__ )
lowerCAmelCase : int = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCAmelCase : Optional[int] = jit(model.generate )
lowerCAmelCase : int = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
lowerCAmelCase : Optional[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowerCAmelCase : Union[str, Any] = "Hello world"
lowerCAmelCase : List[Any] = tokenizer(snake_case__ , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(snake_case__ , "do_samples" ):
model.generate(snake_case__ , do_samples=snake_case__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(snake_case__ , "foo" ):
lowerCAmelCase : Dict = {"foo": "bar"}
model.generate(snake_case__ , **snake_case__ )
| 645 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
from math import isqrt, loga
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase :List[str] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] = 800800 , SCREAMING_SNAKE_CASE__ : List[str] = 800800 ):
UpperCamelCase :Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
UpperCamelCase :Tuple = int(__SCREAMING_SNAKE_CASE )
UpperCamelCase :List[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
UpperCamelCase :Optional[Any] = 0
UpperCamelCase :Any = 0
UpperCamelCase :Optional[int] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[int]:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
if args.check_lib:
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module('''transformers''')
SCREAMING_SNAKE_CASE_ : str = Path(transformers_module.__file__).parent
else:
SCREAMING_SNAKE_CASE_ : Tuple = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 375 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.unet.config.in_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=1024 ):
'''simple docstring'''
lowercase__ , lowercase__ = [], []
lowercase__ = list(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowercase__ , lowercase__ = sorted_examples[0]
def is_too_big(lowerCamelCase_ ):
return tok(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase__ = new_src + ''' ''' + src
lowercase__ = new_tgt + ''' ''' + tgt
if is_too_big(__SCREAMING_SNAKE_CASE ) or is_too_big(__SCREAMING_SNAKE_CASE ): # cant fit, finalize example
finished_src.append(__SCREAMING_SNAKE_CASE )
finished_tgt.append(__SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = src, tgt
else: # can fit, keep adding
lowercase__ , lowercase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__SCREAMING_SNAKE_CASE )
finished_tgt.append(__SCREAMING_SNAKE_CASE )
return finished_src, finished_tgt
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = Path(__SCREAMING_SNAKE_CASE )
save_path.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
for split in ["train"]:
lowercase__ , lowercase__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
lowercase__ = [x.rstrip() for x in Path(__SCREAMING_SNAKE_CASE ).open().readlines()]
lowercase__ = [x.rstrip() for x in Path(__SCREAMING_SNAKE_CASE ).open().readlines()]
lowercase__ , lowercase__ = pack_examples(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F"""packed {split} split from {len(__SCREAMING_SNAKE_CASE )} examples -> {len(__SCREAMING_SNAKE_CASE )}.""" )
Path(save_path / F"""{split}.source""" ).open('''w''' ).write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
Path(save_path / F"""{split}.target""" ).open('''w''' ).write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
for split in ["val", "test"]:
lowercase__ , lowercase__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(__SCREAMING_SNAKE_CASE , save_path / F"""{split}.source""" )
shutil.copyfile(__SCREAMING_SNAKE_CASE , save_path / F"""{split}.target""" )
def a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=__SCREAMING_SNAKE_CASE , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=__SCREAMING_SNAKE_CASE , default=128 )
parser.add_argument('''--data_dir''' , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('''--save_path''' , type=__SCREAMING_SNAKE_CASE )
lowercase__ = parser.parse_args()
lowercase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__SCREAMING_SNAKE_CASE , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 183 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _UpperCAmelCase ( __A : Optional[Any] ):
return (data["data"], data["target"])
def _UpperCAmelCase ( __A : List[Any] , __A : int , __A : Optional[int] ):
a_ : List[str] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Predict target for test data
a_ : List[str] = xgb.predict(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = predictions.reshape(len(__SCREAMING_SNAKE_CASE ) , 1 )
return predictions
def _UpperCAmelCase ( ):
a_ : str = fetch_california_housing()
a_ , a_ : List[Any] = data_handling(__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ , a_ : str = train_test_split(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1 )
a_ : List[str] = xgboost(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}' )
print(f'Mean Square Error : {mean_squared_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 466 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_UpperCamelCase : Dict = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __UpperCAmelCase ( A : List[Any] , A : Dict , A : str ) -> int:
UpperCAmelCase_ : int = state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = val
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : Tuple = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCAmelCase_ : Dict = value
else:
UpperCAmelCase_ : Dict = value
return new_state_dict
def __UpperCAmelCase ( A : Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase_ : Tuple = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[Any] = in_proj_weight[:2_5_6, :]
UpperCAmelCase_ : str = in_proj_bias[:2_5_6]
UpperCAmelCase_ : int = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase_ : Any = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase_ : List[Any] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase_ : Any = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : Tuple = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : str = in_proj_weight[:2_5_6, :]
UpperCAmelCase_ : Tuple = in_proj_bias[:2_5_6]
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase_ : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase_ : str = in_proj_weight[-2_5_6:, :]
UpperCAmelCase_ : Any = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : Optional[int] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
UpperCAmelCase_ : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : str = in_proj_weight_cross_attn[:2_5_6, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[:2_5_6]
UpperCAmelCase_ : str = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
UpperCAmelCase_ : Any = in_proj_bias_cross_attn[2_5_6:5_1_2]
UpperCAmelCase_ : List[Any] = in_proj_weight_cross_attn[-2_5_6:, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias_cross_attn[-2_5_6:]
def __UpperCAmelCase ( A : List[str] , A : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.size
UpperCAmelCase_ : Dict = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
UpperCAmelCase_ : Optional[Any] = target_max_size / current_max_size
UpperCAmelCase_ : Optional[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCAmelCase ( A : Dict ) -> int:
UpperCAmelCase_ : Tuple = F.to_tensor(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = F.normalize(__SCREAMING_SNAKE_CASE , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __UpperCAmelCase ( A : Optional[int] , A : str , A : int ) -> Any:
logger.info('''Converting model...''' )
# load original state dict
UpperCAmelCase_ : str = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = rename_backbone_keys(__SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(__SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ : Optional[Any] = state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = val
# create HuggingFace model and load state dict
UpperCAmelCase_ : Any = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Tuple = 1_5
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = {0: '''table''', 1: '''table rotated'''}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ : str = 1_2_5
UpperCAmelCase_ : Union[str, Any] = 6
UpperCAmelCase_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
UpperCAmelCase_ : List[Any] = TableTransformerForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
UpperCAmelCase_ : Tuple = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCAmelCase_ : List[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = Image.open(__SCREAMING_SNAKE_CASE ).convert('''RGB''' )
UpperCAmelCase_ : Optional[Any] = normalize(resize(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
UpperCAmelCase_ : Any = model(__SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Optional[Any] = (1, 1_5, 3)
UpperCAmelCase_ : Tuple = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCAmelCase_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCAmelCase_ : Optional[Any] = (1, 1_2_5, 7)
UpperCAmelCase_ : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCAmelCase_ : Tuple = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_UpperCamelCase : Tuple = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 541 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
class _a ( __lowerCamelCase ):
"""simple docstring"""
A_ = ["""input_features""", """attention_mask"""]
def __init__( self , _UpperCAmelCase=80 , _UpperCAmelCase=16000 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=25 , _UpperCAmelCase="hamming_window" , _UpperCAmelCase=32768.0 , _UpperCAmelCase=0.9_7 , _UpperCAmelCase=1.0 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , **_UpperCAmelCase , ) -> Any:
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = feature_size
UpperCamelCase_ = sampling_rate
UpperCamelCase_ = padding_value
UpperCamelCase_ = hop_length
UpperCamelCase_ = win_length
UpperCamelCase_ = frame_signal_scale
UpperCamelCase_ = preemphasis_coeff
UpperCamelCase_ = mel_floor
UpperCamelCase_ = normalize_means
UpperCamelCase_ = normalize_vars
UpperCamelCase_ = win_function
UpperCamelCase_ = return_attention_mask
UpperCamelCase_ = win_length * sampling_rate // 1000
UpperCamelCase_ = hop_length * sampling_rate // 1000
UpperCamelCase_ = optimal_fft_length(self.sample_size )
UpperCamelCase_ = (self.n_fft // 2) + 1
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Any:
if self.win_function == "hamming_window":
UpperCamelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
else:
UpperCamelCase_ = window_function(window_length=self.sample_size , name=self.win_function )
UpperCamelCase_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCamelCase_ = spectrogram(
one_waveform * self.frame_signal_scale , window=_UpperCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_UpperCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=_UpperCAmelCase , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCamelCase_ = x[:input_length].mean(axis=0 )
UpperCamelCase_ = np.subtract(_UpperCAmelCase , _UpperCAmelCase )
if self.normalize_vars:
UpperCamelCase_ = x[:input_length].std(axis=0 )
UpperCamelCase_ = np.divide(_UpperCAmelCase , _UpperCAmelCase )
if input_length < x.shape[0]:
UpperCamelCase_ = padding_value
# make sure array is in float32
UpperCamelCase_ = x.astype(np.floataa )
return x
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_UpperCAmelCase , _UpperCAmelCase , self.padding_value ) for x, n in zip(_UpperCAmelCase , _UpperCAmelCase )]
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> Optional[Any]:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCamelCase_ = isinstance(_UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase_ = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase_ = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
UpperCamelCase_ = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase_ = [raw_speech]
# extract fbank features
UpperCamelCase_ = [self._extract_mfsc_features(_UpperCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase_ = BatchFeature({'input_features': features} )
UpperCamelCase_ = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
# make sure list is in array format
UpperCamelCase_ = padded_inputs.get('input_features' )
if isinstance(input_features[0] , _UpperCAmelCase ):
UpperCamelCase_ = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
UpperCamelCase_ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCamelCase_ = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCamelCase_ = (
np.array(_UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCamelCase_ = self.normalize(
padded_inputs['input_features'] , attention_mask=_UpperCAmelCase )
if return_tensors is not None:
UpperCamelCase_ = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = KandinskyVaaPriorPipeline
_lowerCamelCase = ["""prompt"""]
_lowerCamelCase = ["""prompt""", """negative_prompt"""]
_lowerCamelCase = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase = False
@property
def UpperCamelCase__ ( self ) -> List[str]:
return 3_2
@property
def UpperCamelCase__ ( self ) -> int:
return 3_2
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return self.time_input_dim
@property
def UpperCamelCase__ ( self ) -> Any:
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ) -> Tuple:
return 1_0_0
@property
def UpperCamelCase__ ( self ) -> Any:
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def UpperCamelCase__ ( self ) -> List[str]:
torch.manual_seed(0 )
A = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_2,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A = PriorTransformer(**lowerCamelCase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=2_2_4 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1_4 ,)
A = CLIPVisionModelWithProjection(lowerCamelCase_ )
return model
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
A = CLIPImageProcessor(
crop_size=2_2_4 ,do_center_crop=lowerCamelCase_ ,do_normalize=lowerCamelCase_ ,do_resize=lowerCamelCase_ ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=2_2_4 ,)
return image_processor
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.dummy_prior
A = self.dummy_image_encoder
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_image_processor
A = UnCLIPScheduler(
variance_type="""fixed_small_log""" ,prediction_type="""sample""" ,num_train_timesteps=1_0_0_0 ,clip_sample=lowerCamelCase_ ,clip_sample_range=10.0 ,)
A = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=0 ) -> Optional[Any]:
if str(lowerCamelCase_ ).startswith("""mps""" ):
A = torch.manual_seed(lowerCamelCase_ )
else:
A = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
A = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = """cpu"""
A = self.get_dummy_components()
A = self.pipeline_class(**lowerCamelCase_ )
A = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
A = output.image_embeds
A = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) ,return_dict=lowerCamelCase_ ,)[0]
A = image[0, -1_0:]
A = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
A = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ) -> Optional[int]:
A = torch_device == """cpu"""
A = True
A = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCamelCase_ ,relax_max_difference=lowerCamelCase_ ,test_mean_pixel_difference=lowerCamelCase_ ,)
@skip_mps
def UpperCamelCase__ ( self ) -> Any:
A = torch_device == """cpu"""
A = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCamelCase_ ,test_mean_pixel_difference=lowerCamelCase_ ,)
| 617 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=0 , ) -> Tuple:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = projection_dim
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
lowercase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__ = TFDPRContextEncoder(config=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = TFDPRQuestionEncoder(config=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = TFDPRReader(config=lowerCamelCase__ )
lowercase__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class A ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCamelCase : Union[str, Any] = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCamelCase : Any = False
lowerCamelCase : int = False
lowerCamelCase : List[str] = False
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = False
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = TFDPRModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def A__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCamelCase__ )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCamelCase__ )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCamelCase__ )
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFDPRQuestionEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFDPRReader.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class A ( unittest.TestCase ):
@slow
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
lowercase__ = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowercase__ = model(lowerCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 325 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A_ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase_: Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ ,UpperCamelCase_: str = XLMProphetNetForConditionalGeneration.from_pretrained(
__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_: List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ ,UpperCamelCase_: List[Any] = ProphetNetForConditionalGeneration.from_pretrained(
__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE )
UpperCamelCase_: int = ['key_proj', 'value_proj', 'query_proj']
UpperCamelCase_: List[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
UpperCamelCase_: int = key.split('.' )
if attributes[0] == "lm_head":
UpperCamelCase_: List[str] = prophet
UpperCamelCase_: Union[str, Any] = prophet_old
else:
UpperCamelCase_: Dict = prophet.prophetnet
UpperCamelCase_: List[Any] = prophet_old.model
UpperCamelCase_: int = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase_: Optional[int] = mapping[attribute]
if not hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase_: Dict = attribute
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase_: Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase_: int = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
UpperCamelCase_: Tuple = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase_: Optional[Any] = old_model.bias
logger.info(F'''{attribute} is initialized''' )
UpperCamelCase_: Dict = True
break
elif attribute in special_keys and hasattr(__SCREAMING_SNAKE_CASE , 'in_proj_weight' ):
UpperCamelCase_: Optional[int] = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase_: Tuple = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase_: List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase_: int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase_: Tuple = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase_: List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase_: List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase_: Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase_: Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
UpperCamelCase_: Optional[int] = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
UpperCamelCase_: Optional[Any] = True
break
if attribute.isdigit():
UpperCamelCase_: Tuple = model[int(__SCREAMING_SNAKE_CASE )]
UpperCamelCase_: Tuple = old_model[int(__SCREAMING_SNAKE_CASE )]
else:
UpperCamelCase_: Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if old_attribute == "":
UpperCamelCase_: int = old_model
else:
if not hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
UpperCamelCase_: int = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : Union[str, Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 57 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 626 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for attribute in key.split("." ):
lowerCAmelCase : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowerCAmelCase : Any = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
lowerCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
lowerCAmelCase : Any = value
elif weight_type == "weight_v":
lowerCAmelCase : Optional[int] = value
elif weight_type == "bias":
lowerCAmelCase : Dict = value
else:
lowerCAmelCase : Optional[Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Any = fairseq_model.state_dict()
lowerCAmelCase : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase : Any = None
for name, value in fairseq_dict.items():
lowerCAmelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase : str = True
elif name.split("." )[0] == "proj":
lowerCAmelCase : Tuple = fairseq_model.proj
lowerCAmelCase : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase : int = True
if "*" in mapped_key:
lowerCAmelCase : Tuple = name.split(__SCREAMING_SNAKE_CASE )[0].split("." )[-2]
lowerCAmelCase : Optional[Any] = mapped_key.replace("*" , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowerCAmelCase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
lowerCAmelCase : List[str] = "weight_v"
elif "bias" in name:
lowerCAmelCase : int = "bias"
elif "weight" in name:
lowerCAmelCase : List[str] = "weight"
else:
lowerCAmelCase : Any = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : str = full_name.split("conv_layers." )[-1]
lowerCAmelCase : Tuple = name.split("." )
lowerCAmelCase : Optional[Any] = int(items[0] )
lowerCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : int = emb.weight.shape
lowerCAmelCase : int = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = emb.weight.data
return lin_layer
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
lowerCAmelCase : List[Any] = f.readlines()
lowerCAmelCase : int = [line.split(" " )[0] for line in lines]
lowerCAmelCase : int = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = SpeechaTextaConfig.from_pretrained(
__SCREAMING_SNAKE_CASE , vocab_size=__SCREAMING_SNAKE_CASE , decoder_layers=__SCREAMING_SNAKE_CASE , do_stable_layer_norm=__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCAmelCase : Union[str, Any] = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase : Any = WavaVecaModel(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = recursively_load_weights_wavaveca(model.encoder , __SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = SpeechaTextaForCausalLM(__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove("embed_out" )
lowerCAmelCase : str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowerCAmelCase : Dict = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = False
# add projection layer
lowerCAmelCase : List[Any] = nn.Parameter(projection_layer.weight )
lowerCAmelCase : Union[str, Any] = nn.Parameter(projection_layer.bias )
lowerCAmelCase : List[str] = create_vocab_dict(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , "vocab.json" ) , "w" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SpeechaTextaTokenizer(os.path.join(__SCREAMING_SNAKE_CASE , "vocab.json" ) )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = hf_wavavec.config.to_dict()
lowerCAmelCase : Tuple = tokenizer.pad_token_id
lowerCAmelCase : List[str] = tokenizer.bos_token_id
lowerCAmelCase : List[str] = tokenizer.eos_token_id
lowerCAmelCase : Any = "speech_to_text_2"
lowerCAmelCase : Any = "wav2vec2"
lowerCAmelCase : List[str] = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
lowerCAmelCase__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 645 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict =DDIMPipeline
UpperCamelCase_ : int =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase_ : Any =PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase_ : List[Any] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase_ : Any =False
def UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
UpperCamelCase :List[Any] = DDIMScheduler()
UpperCamelCase :List[str] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> List[Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
UpperCamelCase :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase :Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = '''cpu'''
UpperCamelCase :str = self.get_dummy_components()
UpperCamelCase :List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase :Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCamelCase :Any = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
UpperCamelCase :List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
def UpperCAmelCase ( self ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ) -> Dict:
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ) -> Any:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = '''google/ddpm-cifar10-32'''
UpperCamelCase :Dict = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = DDIMScheduler()
UpperCamelCase :List[str] = DDIMPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
ddim.to(SCREAMING_SNAKE_CASE_ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = torch.manual_seed(0 )
UpperCamelCase :Dict = ddim(generator=SCREAMING_SNAKE_CASE_ , eta=0.0 , output_type='''numpy''' ).images
UpperCamelCase :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase :Union[str, Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Optional[int] = '''google/ddpm-ema-bedroom-256'''
UpperCamelCase :int = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = DDIMPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
ddpm.to(SCREAMING_SNAKE_CASE_ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = torch.manual_seed(0 )
UpperCamelCase :Dict = ddpm(generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
UpperCamelCase :str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase :Optional[int] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 658 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> List[Any]:
# Construct model
if gpta_config_file == "":
__lowercase = GPTaConfig()
else:
__lowercase = GPTaConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__lowercase = GPTaModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 375 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : List[Any], lowerCamelCase : Optional[Any]=13, lowerCamelCase : Any=7, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Dict=True, lowerCamelCase : Optional[int]=99, lowerCamelCase : int=[1, 1, 2], lowerCamelCase : Any=1, lowerCamelCase : str=32, lowerCamelCase : int=4, lowerCamelCase : Tuple=8, lowerCamelCase : List[str]=37, lowerCamelCase : Optional[int]="gelu_new", lowerCamelCase : int=0.1, lowerCamelCase : Dict=0.1, lowerCamelCase : Optional[Any]=0.0, lowerCamelCase : Tuple=512, lowerCamelCase : List[str]=3, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Optional[Any]=3, lowerCamelCase : Tuple=4, lowerCamelCase : int=None, lowerCamelCase : Dict=False, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Dict, lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = TFFunnelModel(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = model(lowerCamelCase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
def lowercase__ ( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : Dict, ):
'''simple docstring'''
lowercase__ = TFFunnelBaseModel(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = model(lowerCamelCase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model) )
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
def lowercase__ ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int, ):
'''simple docstring'''
lowercase__ = TFFunnelForPreTraining(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : int, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : Any, ):
'''simple docstring'''
lowercase__ = TFFunnelForMaskedLM(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any], lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int], ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Dict, ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCamelCase )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any], lowerCamelCase : str, lowerCamelCase : Tuple, lowerCamelCase : str, lowerCamelCase : int, ):
'''simple docstring'''
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __lowerCamelCase ,__lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = TFFunnelModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
@require_tf
class _UpperCAmelCase ( __lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = TFFunnelModelTester(self, base=lowerCamelCase )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
| 183 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['ConvNextFeatureExtractor']
__lowerCAmelCase = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 466 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : Tuple ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def __UpperCAmelCase ( A : str = 1_0_0_0_0_0_0 ) -> Any:
UpperCAmelCase_ : Any = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 541 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case__ : str = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = ["""OwlViTFeatureExtractor"""]
snake_case__ : Tuple = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 | 0 |
"""simple docstring"""
def _A ( _a : Tuple ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
A = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84 | 0 |
'''simple docstring'''
def _A ( lowercase__ = 50 ):
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=6_4 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=6_4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCamelCase_: Optional[int] = parent
UpperCamelCase_: List[str] = batch_size
UpperCamelCase_: Dict = seq_length
UpperCamelCase_: int = is_training
UpperCamelCase_: str = use_input_mask
UpperCamelCase_: List[Any] = use_token_type_ids
UpperCamelCase_: Dict = use_labels
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: int = hidden_size
UpperCamelCase_: Tuple = num_hidden_layers
UpperCamelCase_: Optional[Any] = num_attention_heads
UpperCamelCase_: Union[str, Any] = intermediate_size
UpperCamelCase_: Tuple = hidden_act
UpperCamelCase_: Dict = hidden_dropout_prob
UpperCamelCase_: Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_: Tuple = max_position_embeddings
UpperCamelCase_: str = type_vocab_size
UpperCamelCase_: Union[str, Any] = type_sequence_label_size
UpperCamelCase_: int = initializer_range
UpperCamelCase_: Optional[int] = num_labels
UpperCamelCase_: Tuple = num_choices
UpperCamelCase_: Optional[int] = scope
def _a ( self ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _a ( self ):
UpperCamelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: List[str] = None
if self.use_input_mask:
UpperCamelCase_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: Optional[int] = None
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
if self.use_labels:
UpperCamelCase_: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = MPNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = MPNetForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Tuple = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = self.num_labels
UpperCamelCase_: Optional[int] = MPNetForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = self.num_choices
UpperCamelCase_: Tuple = MPNetForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: Tuple = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = self.num_labels
UpperCamelCase_: Optional[Any] = MPNetForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self ):
UpperCamelCase_: int = self.prepare_config_and_inputs()
((UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_)): int = config_and_inputs
UpperCamelCase_: List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
a : Optional[Any] =(
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =False
a : List[str] =True
def _a ( self ):
UpperCamelCase_: Optional[int] = MPNetModelTester(self )
UpperCamelCase_: Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_lowerCamelCase )
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: Optional[Any] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
UpperCamelCase_: Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase_: str = model(_lowerCamelCase )[0]
UpperCamelCase_: Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 ) ) | 57 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84 | 0 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
SCREAMING_SNAKE_CASE_ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
SCREAMING_SNAKE_CASE_ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__SCREAMING_SNAKE_CASE )
return next_generation
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = []
for _ in range(__SCREAMING_SNAKE_CASE ):
# Create output image
SCREAMING_SNAKE_CASE_ = Image.new('''RGB''', (len(cells[0] ), len(__SCREAMING_SNAKE_CASE )) )
SCREAMING_SNAKE_CASE_ = img.load()
# Save cells to image
for x in range(len(__SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
SCREAMING_SNAKE_CASE_ = 255 - cells[y][x] * 255
SCREAMING_SNAKE_CASE_ = (colour, colour, colour)
# Save image
images.append(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = new_generation(__SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
lowerCAmelCase__ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:]) | 626 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''prophetnet.tokenizer'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
lowerCAmelCase__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
lowerCAmelCase__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': 512,
}
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Any = collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as reader:
lowerCAmelCase : List[Any] = reader.readlines()
for index, token in enumerate(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = token.rstrip("\n" )
lowerCAmelCase : Optional[int] = index
return vocab
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
"""simple docstring"""
a : Tuple =VOCAB_FILES_NAMES
a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
a : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case__ , snake_case__="[SEP]" , snake_case__="[SEP]" , snake_case__="[SEP]" , snake_case__="[UNK]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
lowerCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCAmelCase : int = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
lowerCAmelCase : Dict = f"""[unused{i}]"""
lowerCAmelCase : Optional[int] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCAmelCase : List[Any] = 12
lowerCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.__dict__.copy()
lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase : Tuple = {}
lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase : Union[str, Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCAmelCase : Tuple = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 645 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :List[str] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase :str = FlaxAutoModelForSeqaSeqLM.from_config(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase :Any = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
UpperCamelCase :Union[str, Any] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
UpperCamelCase :Tuple = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase :Any = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase :Union[str, Any] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase :Union[str, Any] = F'''layers_{str(__SCREAMING_SNAKE_CASE )}'''
# Self-Attention
UpperCamelCase :str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
UpperCamelCase :int = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
UpperCamelCase :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
UpperCamelCase :Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase :List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
UpperCamelCase :Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
UpperCamelCase :str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase :Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase :Any = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase :Union[str, Any] = flax_model.params['''encoder''']['''block'''][str(__SCREAMING_SNAKE_CASE )]['''layer''']
UpperCamelCase :Optional[Any] = tax_attention_key
UpperCamelCase :int = tax_attention_out
UpperCamelCase :Optional[int] = tax_attention_query
UpperCamelCase :Dict = tax_attention_value
UpperCamelCase :List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase :Optional[Any] = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase :int = tax_mlp_wi_a
UpperCamelCase :int = tax_mlp_wi_a
else:
UpperCamelCase :Any = tax_mlp_wi
UpperCamelCase :Dict = tax_mlp_wo
UpperCamelCase :Dict = tax_mlp_layer_norm
UpperCamelCase :Union[str, Any] = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase :List[str] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase :int = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase :Dict = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
UpperCamelCase :str = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase :Any = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
UpperCamelCase :Optional[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase :Dict = F'''layers_{str(__SCREAMING_SNAKE_CASE )}'''
# Self-Attention
UpperCamelCase :str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
UpperCamelCase :Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
UpperCamelCase :Tuple = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
UpperCamelCase :str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase :Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
UpperCamelCase :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
UpperCamelCase :str = tax_enc_dec_attention_module['''key''']['''kernel''']
UpperCamelCase :str = tax_enc_dec_attention_module['''out''']['''kernel''']
UpperCamelCase :Optional[int] = tax_enc_dec_attention_module['''query''']['''kernel''']
UpperCamelCase :List[str] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
UpperCamelCase :List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase :List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase :List[str] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase :Tuple = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase :Tuple = flax_model.params['''decoder''']['''block'''][str(__SCREAMING_SNAKE_CASE )]['''layer''']
UpperCamelCase :Optional[Any] = tax_attention_key
UpperCamelCase :int = tax_attention_out
UpperCamelCase :Optional[Any] = tax_attention_query
UpperCamelCase :Optional[int] = tax_attention_value
UpperCamelCase :str = tax_pre_attention_layer_norm
UpperCamelCase :List[Any] = tax_enc_dec_attention_key
UpperCamelCase :Union[str, Any] = tax_enc_dec_attention_out
UpperCamelCase :int = tax_enc_dec_attention_query
UpperCamelCase :Dict = tax_enc_dec_attention_value
UpperCamelCase :Any = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase :Any = tax_mlp_wi_a
UpperCamelCase :Tuple = tax_mlp_wi_a
else:
UpperCamelCase :str = tax_mlp_wi
UpperCamelCase :List[Any] = tax_mlp_wo
UpperCamelCase :List[Any] = txa_mlp_layer_norm
UpperCamelCase :int = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase :str = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
UpperCamelCase :List[Any] = txa_decoder_norm
# Only for layer 0:
UpperCamelCase :int = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase :List[Any] = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase :Union[str, Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
UpperCamelCase :Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase :Union[str, Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__SCREAMING_SNAKE_CASE )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
__snake_case = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 658 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 375 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.unet.config.in_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Dict=13, lowerCamelCase : Dict=30, lowerCamelCase : List[str]=2, lowerCamelCase : List[str]=3, lowerCamelCase : Optional[int]=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=32, lowerCamelCase : Optional[int]=5, lowerCamelCase : List[Any]=4, lowerCamelCase : List[str]=37, lowerCamelCase : Tuple="gelu", lowerCamelCase : Dict=0.1, lowerCamelCase : Tuple=0.1, lowerCamelCase : Union[str, Any]=10, lowerCamelCase : str=0.02, lowerCamelCase : Optional[int]=3, lowerCamelCase : Any=0.6, lowerCamelCase : List[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def lowercase__ ( self : int, lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __lowerCamelCase ,__lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = ViTMAEModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowercase__ ( self : int ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
# make masks reproducible
np.random.seed(2 )
lowercase__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = pt_noise
super().check_pt_tf_models(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs[0].cpu().numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowercase__ = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
# Make sure we don't have nans
lowercase__ = after_outputs[0].cpu().numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Any ):
'''simple docstring'''
pass
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase, noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
lowercase__ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice.to(lowerCamelCase ), atol=1E-4 ) )
| 183 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84 | 0 |
'''simple docstring'''
from math import factorial
def _UpperCAmelCase ( __A : str = 1_00 ):
return sum(map(__SCREAMING_SNAKE_CASE , str(factorial(__SCREAMING_SNAKE_CASE ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 466 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_UpperCamelCase : List[str] = sys.version_info >= (3, 10)
def __UpperCAmelCase ( A : Tuple=None , A : Any=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=__SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__ :
a_ = 42
a_ = 42
a_ = 42
a_ = 42
@dataclass
class snake_case__ :
a_ = 42
a_ = field(default="toto" , metadata={"help": "help message"})
@dataclass
class snake_case__ :
a_ = False
a_ = True
a_ = None
class snake_case__ ( __lowerCamelCase):
a_ = """titi"""
a_ = """toto"""
class snake_case__ ( __lowerCamelCase):
a_ = """titi"""
a_ = """toto"""
a_ = 42
@dataclass
class snake_case__ :
a_ = "toto"
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : str = BasicEnum(self.foo )
@dataclass
class snake_case__ :
a_ = "toto"
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Optional[int] = MixedTypeEnum(self.foo )
@dataclass
class snake_case__ :
a_ = None
a_ = field(default=__lowerCamelCase , metadata={"help": "help message"})
a_ = None
a_ = list_field(default=[])
a_ = list_field(default=[])
@dataclass
class snake_case__ :
a_ = list_field(default=[])
a_ = list_field(default=[1, 2, 3])
a_ = list_field(default=["Hallo", "Bonjour", "Hello"])
a_ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class snake_case__ :
a_ = field()
a_ = field()
a_ = field()
def A ( self : str ) -> str:
UpperCAmelCase_ : Dict = BasicEnum(self.required_enum )
@dataclass
class snake_case__ :
a_ = 42
a_ = field()
a_ = None
a_ = field(default="toto" , metadata={"help": "help message"})
a_ = list_field(default=["Hallo", "Bonjour", "Hello"])
if is_python_no_less_than_3_10:
@dataclass
class snake_case__ :
a_ = False
a_ = True
a_ = None
@dataclass
class snake_case__ :
a_ = None
a_ = field(default=__lowerCamelCase , metadata={"help": "help message"})
a_ = None
a_ = list_field(default=[])
a_ = list_field(default=[])
class snake_case__ ( unittest.TestCase):
def A ( self : Union[str, Any] , _A : Dict , _A : int ) -> List[str]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase_ : Dict = {k: v for k, v in vars(_A ).items() if k != '''container'''}
UpperCAmelCase_ : List[Any] = {k: v for k, v in vars(_A ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _A ) and yy.get('''choices''' , _A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_A ) , yy['''type'''](_A ) )
del xx["type"], yy["type"]
self.assertEqual(_A , _A )
def A ( self : Dict ) -> str:
UpperCAmelCase_ : str = HfArgumentParser(_A )
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_A , required=_A )
expected.add_argument('''--bar''' , type=_A , required=_A )
expected.add_argument('''--baz''' , type=_A , required=_A )
expected.add_argument('''--flag''' , type=_A , default=_A , const=_A , nargs='''?''' )
self.argparsersEqual(_A , _A )
UpperCAmelCase_ : Optional[int] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((UpperCAmelCase_ ) , ) : int = parser.parse_args_into_dataclasses(_A , look_for_args_file=_A )
self.assertFalse(example.flag )
def A ( self : Tuple ) -> str:
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_A )
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_A )
expected.add_argument('''--baz''' , default='''toto''' , type=_A , help='''help message''' )
self.argparsersEqual(_A , _A )
def A ( self : List[str] ) -> Any:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_A , default=_A , const=_A , nargs='''?''' )
expected.add_argument('''--baz''' , type=_A , default=_A , const=_A , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_A , dest='''baz''' )
expected.add_argument('''--opt''' , type=_A , default=_A )
UpperCAmelCase_ : Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_A )
for dataclass_type in dataclass_types:
UpperCAmelCase_ : Optional[int] = HfArgumentParser(_A )
self.argparsersEqual(_A , _A )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
UpperCAmelCase_ : Dict = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
UpperCAmelCase_ : Dict = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
UpperCAmelCase_ : Optional[int] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
def A ( self : Tuple ) -> Any:
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser(_A )
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_A , _A )
UpperCAmelCase_ : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCAmelCase_ : str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCAmelCase_ : str = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase_ : Optional[int] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase_ : Any = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def A ( self : List[str] ) -> Optional[Any]:
@dataclass
class snake_case__ :
a_ = "toto"
UpperCAmelCase_ : int = HfArgumentParser(_A )
UpperCAmelCase_ : str = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_A , _A )
UpperCAmelCase_ : Any = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCAmelCase_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCAmelCase_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def A ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_A )
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_A )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_A )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_A )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_A )
self.argparsersEqual(_A , _A )
UpperCAmelCase_ : str = parser.parse_args([] )
self.assertEqual(
_A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase_ : List[Any] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def A ( self : Dict ) -> Dict:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_A , type=_A )
expected.add_argument('''--bar''' , default=_A , type=_A , help='''help message''' )
expected.add_argument('''--baz''' , default=_A , type=_A )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_A )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_A )
UpperCAmelCase_ : Optional[int] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_A )
for dataclass_type in dataclass_types:
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_A )
self.argparsersEqual(_A , _A )
UpperCAmelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_A , Namespace(foo=_A , bar=_A , baz=_A , ces=[] , des=[] ) )
UpperCAmelCase_ : str = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_A , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = HfArgumentParser(_A )
UpperCAmelCase_ : Any = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_A , required=_A )
expected.add_argument('''--required_str''' , type=_A , required=_A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_A , )
self.argparsersEqual(_A , _A )
def A ( self : str ) -> Any:
UpperCAmelCase_ : str = HfArgumentParser(_A )
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_A , required=_A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_A , )
expected.add_argument('''--opt''' , type=_A , default=_A )
expected.add_argument('''--baz''' , default='''toto''' , type=_A , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_A )
self.argparsersEqual(_A , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_A )
UpperCAmelCase_ : int = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
UpperCAmelCase_ : Optional[int] = parser.parse_dict(_A )[0]
UpperCAmelCase_ : Tuple = BasicExample(**_A )
self.assertEqual(_A , _A )
def A ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_A )
UpperCAmelCase_ : Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_A , parser.parse_dict , _A , allow_extra_keys=_A )
def A ( self : Any ) -> int:
UpperCAmelCase_ : Optional[int] = HfArgumentParser(_A )
UpperCAmelCase_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(_A , '''temp_json''' )
os.mkdir(_A )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_A , _A )
UpperCAmelCase_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
UpperCAmelCase_ : int = BasicExample(**_A )
self.assertEqual(_A , _A )
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase_ : Dict = HfArgumentParser(_A )
UpperCAmelCase_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(_A , '''temp_yaml''' )
os.mkdir(_A )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_A , _A )
UpperCAmelCase_ : str = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
UpperCAmelCase_ : Dict = BasicExample(**_A )
self.assertEqual(_A , _A )
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Tuple = HfArgumentParser(_A )
self.assertIsNotNone(_A )
| 541 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=1000 , ) -> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
UpperCamelCase_ = range_bbox
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase_ = bbox[i, j, 3]
UpperCamelCase_ = bbox[i, j, 1]
UpperCamelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase_ = bbox[i, j, 2]
UpperCamelCase_ = bbox[i, j, 0]
UpperCamelCase_ = t
UpperCamelCase_ = tf.convert_to_tensor(_UpperCAmelCase )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = TFLayoutLMModel(config=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , _UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , _UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = TFLayoutLMForMaskedLM(config=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = TFLayoutLMForSequenceClassification(config=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = TFLayoutLMForTokenClassification(config=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = TFLayoutLMForQuestionAnswering(config=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
A_ = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ = False
A_ = True
A_ = 10
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = TFLayoutLMModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = TFLayoutLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _snake_case ():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
UpperCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231
UpperCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231
UpperCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231
UpperCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]]) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , bbox=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
# test the sequence output on [0, :3, :3]
UpperCamelCase_ = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1e-3 ) )
# test the pooled output on [1, :3]
UpperCamelCase_ = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _UpperCAmelCase , atol=1e-3 ) )
@slow
def _UpperCAmelCase ( self ) -> str:
# initialize model with randomly initialized sequence classification head
UpperCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase_ = model(
input_ids=_UpperCAmelCase , bbox=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
UpperCamelCase_ = outputs.loss
UpperCamelCase_ = (2,)
self.assertEqual(loss.shape , _UpperCAmelCase )
# test the shape of the logits
UpperCamelCase_ = outputs.logits
UpperCamelCase_ = (2, 2)
self.assertEqual(logits.shape , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
# initialize model with randomly initialized token classification head
UpperCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase_ = model(
input_ids=_UpperCAmelCase , bbox=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
# test the shape of the logits
UpperCamelCase_ = outputs.logits
UpperCamelCase_ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
# initialize model with randomly initialized token classification head
UpperCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , bbox=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
# test the shape of the logits
UpperCamelCase_ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _UpperCAmelCase )
self.assertEqual(outputs.end_logits.shape , _UpperCAmelCase )
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def _A ( _a : Optional[int] , _a : Union[str, Any] ):
"""simple docstring"""
A = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
A = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def _A ( _a : Tuple , _a : List[Any] ):
"""simple docstring"""
A = str(__SCREAMING_SNAKE_CASE )
dataset_info.write_to_directory(__SCREAMING_SNAKE_CASE )
A = DatasetInfo.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """dataset_info.json""" ) )
def _A ( ):
"""simple docstring"""
A = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
A = dataset_info._to_yaml_dict()
assert sorted(__SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
A = yaml.safe_dump(__SCREAMING_SNAKE_CASE )
A = yaml.safe_load(__SCREAMING_SNAKE_CASE )
assert dataset_info_yaml_dict == reloaded
def _A ( ):
"""simple docstring"""
A = DatasetInfo()
A = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def _A ( _a : Optional[int] , _a : List[Any] ):
"""simple docstring"""
A = str(__SCREAMING_SNAKE_CASE )
dataset_infos_dict.write_to_directory(__SCREAMING_SNAKE_CASE )
A = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """README.md""" ) )
| 617 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 325 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Tuple = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case (UpperCAmelCase__ ) -> Any:
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def snake_case (UpperCAmelCase__ ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def snake_case (UpperCAmelCase__ ) -> List[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase_: Optional[Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
UpperCamelCase_: Dict = 0
# Doctest custom flag to ignore output.
A_ : Optional[Any] = doctest.register_optionflag('IGNORE_RESULT')
A_ : List[str] = doctest.OutputChecker
class _lowerCAmelCase( __lowerCamelCase ):
"""simple docstring"""
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : str = CustomOutputChecker
A_ : Optional[int] = HfDoctestModule
A_ : List[Any] = HfDocTestParser | 57 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class snake_case ( __lowerCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def __call__(self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
SCREAMING_SNAKE_CASE_ = scheduler_output - scheduler_output + torch.ones_like(SCREAMING_SNAKE_CASE_ )
return result | 626 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
"""simple docstring"""
a : List[Any] =VOCAB_FILES_NAMES
a : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__="[CLS]" , snake_case__="[SEP]" , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , **snake_case__ , ):
"""simple docstring"""
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
lowerCAmelCase : Optional[int] = do_lower_case
lowerCAmelCase : int = remove_space
lowerCAmelCase : Optional[Any] = keep_accents
lowerCAmelCase : Optional[Any] = vocab_file
lowerCAmelCase : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase : int = self.__dict__.copy()
lowerCAmelCase : Any = None
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = d
lowerCAmelCase : Any = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , snake_case__ , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : str = self.sp_model.EncodeAsPieces(snake_case__ )
return pieces
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.PieceToId(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.sp_model.decode_pieces(snake_case__ )
return out_string
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : List[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case__ ) )
return
lowerCAmelCase : Optional[int] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 645 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.