code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
'''simple docstring'''
UpperCamelCase_ : int = 'autoformer'
UpperCamelCase_ : int = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "student_t" , UpperCAmelCase_ : str = "nll" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : int = 64 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 25 , UpperCAmelCase_ : int = 3 , **UpperCAmelCase_ : str , ):
# time series specific configuration
SCREAMING_SNAKE_CASE : Tuple = prediction_length
SCREAMING_SNAKE_CASE : str = context_length if context_length is not None else prediction_length
SCREAMING_SNAKE_CASE : Tuple = distribution_output
SCREAMING_SNAKE_CASE : List[Any] = loss
SCREAMING_SNAKE_CASE : List[Any] = input_size
SCREAMING_SNAKE_CASE : Tuple = num_time_features
SCREAMING_SNAKE_CASE : List[Any] = lags_sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = scaling
SCREAMING_SNAKE_CASE : int = num_dynamic_real_features
SCREAMING_SNAKE_CASE : str = num_static_real_features
SCREAMING_SNAKE_CASE : List[Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
SCREAMING_SNAKE_CASE : Any = cardinality
else:
SCREAMING_SNAKE_CASE : List[str] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
SCREAMING_SNAKE_CASE : List[Any] = embedding_dimension
else:
SCREAMING_SNAKE_CASE : List[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE : int = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE : List[Any] = d_model
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = encoder_layers
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE : List[Any] = activation_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE : str = activation_function
SCREAMING_SNAKE_CASE : int = init_std
SCREAMING_SNAKE_CASE : int = use_cache
# Autoformer
SCREAMING_SNAKE_CASE : Optional[int] = label_length
SCREAMING_SNAKE_CASE : str = moving_average
SCREAMING_SNAKE_CASE : Tuple = autocorrelation_factor
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def _A ( self : Tuple ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 357
|
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Dict = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 5_0257 , UpperCAmelCase_ : int = 1024 , UpperCAmelCase_ : int = 768 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "gelu_new" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 1E-5 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , ):
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Dict = prefix_inner_dim
SCREAMING_SNAKE_CASE : Tuple = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Dict = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : List[str] = (
nn.Linear(self.prefix_hidden_dim , __lowerCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : List[Any] = GPTaConfig(
vocab_size=__lowerCAmelCase , n_positions=__lowerCAmelCase , n_embd=__lowerCAmelCase , n_layer=__lowerCAmelCase , n_head=__lowerCAmelCase , n_inner=__lowerCAmelCase , activation_function=__lowerCAmelCase , resid_pdrop=__lowerCAmelCase , embd_pdrop=__lowerCAmelCase , attn_pdrop=__lowerCAmelCase , layer_norm_epsilon=__lowerCAmelCase , initializer_range=__lowerCAmelCase , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , scale_attn_by_inverse_layer_idx=__lowerCAmelCase , reorder_and_upcast_attn=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel(__lowerCAmelCase )
def _A ( self : str , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , ):
SCREAMING_SNAKE_CASE : List[str] = self.transformer.transformer.wte(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.encode_prefix(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.decode_prefix(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Any = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.transformer(inputs_embeds=__lowerCAmelCase , labels=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _A ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.device ):
return torch.zeros(__lowerCAmelCase , self.prefix_length , dtype=torch.intaa , device=__lowerCAmelCase )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
return self.encode_prefix(__lowerCAmelCase )
@torch.no_grad()
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[str] = torch.split(__lowerCAmelCase , 1 , dim=0 )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for feature in features:
SCREAMING_SNAKE_CASE : Dict = self.decode_prefix(feature.to(__lowerCAmelCase ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.generate_beam(
input_embeds=__lowerCAmelCase , device=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : List[Any] = torch.stack(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(__lowerCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _A ( self : str , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int = 5 , UpperCAmelCase_ : int = 67 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[int] = None , ):
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase , dtype=torch.int )
SCREAMING_SNAKE_CASE : str = torch.zeros(__lowerCAmelCase , device=__lowerCAmelCase , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : List[str] = input_embeds
else:
SCREAMING_SNAKE_CASE : int = self.transformer.transformer.wte(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer(inputs_embeds=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = outputs.logits
SCREAMING_SNAKE_CASE : Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Tuple = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = logits.topk(__lowerCAmelCase , -1 )
SCREAMING_SNAKE_CASE : List[str] = generated.expand(__lowerCAmelCase , *generated.shape[1:] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : Dict = next_tokens
else:
SCREAMING_SNAKE_CASE : Optional[Any] = tokens.expand(__lowerCAmelCase , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : str = -float(np.inf )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : Dict = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = scores_sum_average.view(-1 ).topk(__lowerCAmelCase , -1 )
SCREAMING_SNAKE_CASE : Tuple = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : List[Any] = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : int = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : int = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : str = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[int] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : List[Any] = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = is_stopped + next_tokens.eq(__lowerCAmelCase ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : Dict = scores / seq_lengths
SCREAMING_SNAKE_CASE : List[str] = scores.argsort(descending=__lowerCAmelCase )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(__lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE : int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 358
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case = get_logger(__name__)
snake_case = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
for processor in self:
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters
if len(UpperCAmelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : float ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
SCREAMING_SNAKE_CASE : Optional[int] = temperature
def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
SCREAMING_SNAKE_CASE : Optional[int] = top_p
SCREAMING_SNAKE_CASE : str = filter_value
SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] )
SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value )
SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ )
# min tokens to keep
SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1]
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = filter_value
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape
SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value )
SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten()
SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = max_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
SCREAMING_SNAKE_CASE : List[str] = min_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
# create boolean flag to decide if min length penalty should be applied
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = begin_index
def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
def _force_token(UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = scores.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" )
SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) )
return new_scores
SCREAMING_SNAKE_CASE : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id
SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ):
SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
# suppress <|notimestamps|> which is handled by without_timestamps
SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , )
return jnp.where(
UpperCAmelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(
UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 )
def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
return scores
| 319
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = 'mgp-str'
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple=[32, 128] , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[int]=27 , UpperCAmelCase_ : str=38 , UpperCAmelCase_ : Dict=5_0257 , UpperCAmelCase_ : List[Any]=3_0522 , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Optional[Any]=4.0 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=1E-5 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=0.02 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**a_ )
UpperCamelCase__ : int = image_size
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : int = num_channels
UpperCamelCase__ : str = max_token_length
UpperCamelCase__ : Optional[int] = num_character_labels
UpperCamelCase__ : str = num_bpe_labels
UpperCamelCase__ : Optional[int] = num_wordpiece_labels
UpperCamelCase__ : Optional[Any] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : Optional[int] = mlp_ratio
UpperCamelCase__ : Dict = distilled
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Tuple = drop_rate
UpperCamelCase__ : str = qkv_bias
UpperCamelCase__ : Tuple = attn_drop_rate
UpperCamelCase__ : Any = drop_path_rate
UpperCamelCase__ : Union[str, Any] = output_aa_attentions
UpperCamelCase__ : List[str] = initializer_range
| 359
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 319
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = 'beit'
def __init__( self : Dict , UpperCAmelCase_ : str=8192 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : str=3072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Dict=1E-12 , UpperCAmelCase_ : Tuple=224 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=[3, 5, 7, 11] , UpperCAmelCase_ : int=[1, 2, 3, 6] , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : List[str]=256 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : int=255 , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = use_mask_token
SCREAMING_SNAKE_CASE : List[str] = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE : Any = use_relative_position_bias
SCREAMING_SNAKE_CASE : Any = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE : Dict = layer_scale_init_value
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE : Union[str, Any] = out_indices
SCREAMING_SNAKE_CASE : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE : Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE : int = auxiliary_loss_weight
SCREAMING_SNAKE_CASE : Tuple = auxiliary_channels
SCREAMING_SNAKE_CASE : Any = auxiliary_num_convs
SCREAMING_SNAKE_CASE : str = auxiliary_concat_input
SCREAMING_SNAKE_CASE : Optional[Any] = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = version.parse('''1.11''' )
@property
def _A ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self : Union[str, Any] ):
return 1E-4
| 360
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = KandinskyVaaControlnetImgaImgPipeline
UpperCamelCase_ : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCamelCase_ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCamelCase_ : List[str] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ : Union[str, Any] = False
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Union[str, Any] ):
return 32
@property
def _A ( self : List[Any] ):
return self.time_input_dim
@property
def _A ( self : Any ):
return self.time_input_dim * 4
@property
def _A ( self : Tuple ):
return 100
@property
def _A ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_snake_case )
return model
@property
def _A ( self : Optional[Any] ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _A ( self : int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : str = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_snake_case )
SCREAMING_SNAKE_CASE : str = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _A ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=0 ):
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_snake_case ) ).to(_snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_snake_case )
# create init_image
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(_snake_case )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
SCREAMING_SNAKE_CASE : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = "cpu"
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**_snake_case )
SCREAMING_SNAKE_CASE : int = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_snake_case ) )
SCREAMING_SNAKE_CASE : Any = output.images
SCREAMING_SNAKE_CASE : int = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Dict = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
SCREAMING_SNAKE_CASE : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE : Any = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(np.array(_snake_case ) ).float() / 255.0
SCREAMING_SNAKE_CASE : Any = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Any = "A robot, 4k photo"
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = pipe_prior(
_snake_case , image=_snake_case , strength=0.85 , generator=_snake_case , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , hint=_snake_case , generator=_snake_case , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 361
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 362
|
def lowerCamelCase__ ( ):
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
snake_case = generate_large_matrix()
snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2
SCREAMING_SNAKE_CASE : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE : List[Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = len(grid[0] )
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def lowerCamelCase__ ( ):
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
SCREAMING_SNAKE_CASE : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 319
| 0
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : str = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = emb.weight.data
return lin_layer
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = torch.load(UpperCamelCase__ , map_location="cpu" )
SCREAMING_SNAKE_CASE : List[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
SCREAMING_SNAKE_CASE : Any = mam_aaa["model"]
remove_ignore_keys_(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = state_dict["encoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE : Dict = MaMaaaConfig(
vocab_size=UpperCamelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
SCREAMING_SNAKE_CASE : List[Any] = state_dict["decoder.embed_tokens.weight"]
SCREAMING_SNAKE_CASE : Any = MaMaaaForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
snake_case = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 363
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case = ["""small""", """medium""", """large"""]
snake_case = """lm_head.decoder.weight"""
snake_case = """lm_head.weight"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase )
SCREAMING_SNAKE_CASE : Any = d.pop(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
snake_case = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 319
| 0
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE : Tuple = len(_SCREAMING_SNAKE_CASE ) - 1
def _A ( self : int , UpperCAmelCase_ : str ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _SCREAMING_SNAKE_CASE ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_SCREAMING_SNAKE_CASE ) , 5 ) == 1
return output_values
def _A ( self : Any , UpperCAmelCase_ : Tuple ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : Dict = self.basis_function(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _A ( self : Tuple , UpperCAmelCase_ : Any = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE : List[Any] = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE : int = self.bezier_curve_function(_SCREAMING_SNAKE_CASE )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE : Tuple = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE : Any = [i[1] for i in self.list_of_points]
plt.plot(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase__ ( lowercase , lowercase , lowercase=None , lowercase=None ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = OPTConfig
UpperCamelCase_ : Tuple = {}
UpperCamelCase_ : Dict = '''gelu'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[Any]=20 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Optional[Any]=16 , ):
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Dict = bos_token_id
SCREAMING_SNAKE_CASE : int = embed_dim
SCREAMING_SNAKE_CASE : Dict = word_embed_proj_dim
SCREAMING_SNAKE_CASE : List[str] = False
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCAmelCase_ , **self.config_updates , )
SCREAMING_SNAKE_CASE : Tuple = prepare_opt_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def _A ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TFOPTModel(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE : Any = input_ids[:1, :]
SCREAMING_SNAKE_CASE : List[str] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE : Dict = 1
# first forward pass
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : int = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3 )
@require_tf
class SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = 1_0
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[Any] = TFOPTModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=UpperCAmelCase_ )
def _A ( self : Dict ):
self.config_tester.run_common_tests()
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ):
if hasattr(UpperCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = _get_word_embedding_weight(UpperCAmelCase_ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE : Optional[int] = _get_word_embedding_weight(UpperCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = _get_word_embedding_weight(UpperCAmelCase_ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE : int = _get_word_embedding_weight(UpperCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCAmelCase_ )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
self.assertTrue(UpperCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE : Dict = False
self.assertTrue(UpperCAmelCase_ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return tf.constant(_UpperCAmelCase , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 9_9
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[str] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE : List[str] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
SCREAMING_SNAKE_CASE : Tuple = input_ids.shape[0]
SCREAMING_SNAKE_CASE : Tuple = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = TFOPTModel.from_pretrained("facebook/opt-350m" )
SCREAMING_SNAKE_CASE : str = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Dict = tf.not_equal(UpperCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE : List[Any] = model(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).last_hidden_state
SCREAMING_SNAKE_CASE : str = (1, 11, 512)
self.assertEqual(output.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=4E-3 ) )
SCREAMING_SNAKE_CASE : Any = tf.function(UpperCAmelCase_ , jit_compile=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = xla_generate(UpperCAmelCase_ , UpperCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=4E-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : int ):
super().setUp()
SCREAMING_SNAKE_CASE : Any = "facebook/opt-350m"
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE : int = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE : str = tokenizer(UpperCAmelCase_ , return_tensors="tf" , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-4 ) )
SCREAMING_SNAKE_CASE : int = tf.function(UpperCAmelCase_ , jit_compile=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def _A ( self : Optional[int] ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = "facebook/opt-125m"
SCREAMING_SNAKE_CASE : List[str] = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE : Any = tokenizer(UpperCAmelCase_ , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Dict = model.generate(UpperCAmelCase_ , max_length=10 )
SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = "facebook/opt-350m"
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = "left"
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : Optional[Any] = [
"Hello, my dog is a little",
"Today, I",
]
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCAmelCase_ , return_tensors="tf" , padding=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs["input_ids"]
SCREAMING_SNAKE_CASE : List[str] = model.generate(input_ids=UpperCAmelCase_ , attention_mask=inputs["attention_mask"] )
SCREAMING_SNAKE_CASE : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : List[Any] = model.generate(input_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
SCREAMING_SNAKE_CASE : int = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(input_ids=UpperCAmelCase_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [non_padded_sentence, padded_sentence] )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = "facebook/opt-350m"
SCREAMING_SNAKE_CASE : str = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = TFOPTForCausalLM.from_pretrained(UpperCAmelCase_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCAmelCase_ , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : int = model.generate(UpperCAmelCase_ , max_length=10 )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 365
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 319
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = ["""pixel_values"""]
def __init__( self : Optional[Any] , UpperCAmelCase_ : Dict = True , UpperCAmelCase_ : List[str] = 32 , UpperCAmelCase_ : str=PILImageResampling.BILINEAR , UpperCAmelCase_ : Tuple = True , **UpperCAmelCase_ : List[Any] , ):
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Dict = do_rescale
SCREAMING_SNAKE_CASE : List[str] = size_divisor
SCREAMING_SNAKE_CASE : int = resample
super().__init__(**lowerCAmelCase__ )
def _A ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple = None , **UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = get_image_size(lowerCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE : Union[str, Any] = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE : List[Any] = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE : Tuple = resize(lowerCAmelCase__ , (new_h, new_w) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
return image
def _A ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int = None , **UpperCAmelCase_ : Any ):
return rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : Optional[int] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[Any] , ):
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Tuple = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : int = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
SCREAMING_SNAKE_CASE : List[str] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[str] = [to_numpy_array(lowerCAmelCase__ ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(lowerCAmelCase__ , size_divisor=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.rescale(lowerCAmelCase__ , scale=1 / 255 ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 366
|
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts
SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets
SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = src_parent
SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ )
return True
def _A ( self : Tuple , UpperCAmelCase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 319
| 0
|
def lowerCamelCase__ ( lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = abs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCamelCase__ ( lowercase ):
SCREAMING_SNAKE_CASE : List[str] = abs(UpperCamelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCamelCase__ ( lowercase ):
return sum(int(UpperCamelCase__ ) for c in str(abs(UpperCamelCase__ ) ) )
def lowerCamelCase__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase , lowercase ) -> None:
SCREAMING_SNAKE_CASE : str = F'''{func.__name__}({value})'''
SCREAMING_SNAKE_CASE : Dict = timeit(F'''__main__.{call}''' , setup="import __main__" )
print(F'''{call:56} = {func(UpperCamelCase__ )} -- {timing:.4f} seconds''' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 367
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''timm_backbone'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
| 319
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , _lowerCamelCase , )
super().__init__(args=_lowerCamelCase , **_lowerCamelCase )
| 368
|
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.shape
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = emb.weight.data
return lin_layer
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE : int = key.replace("moe_layer.experts.0" , F'''ffn.experts.expert_{expert_idx}''' )
else:
SCREAMING_SNAKE_CASE : str = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
SCREAMING_SNAKE_CASE : int = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE : Any = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE : Dict = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE : str = key.replace("final_layer_norm" , "ff_layer_norm" )
SCREAMING_SNAKE_CASE : List[str] = state_dict[old_key]
return new_dict
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase = WEIGHTS_NAME ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = 0
os.makedirs(_snake_case , exist_ok=_snake_case )
for expert in range(_snake_case ):
SCREAMING_SNAKE_CASE : Dict = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_snake_case ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(_snake_case )["model"]
remove_ignore_keys_(_snake_case )
SCREAMING_SNAKE_CASE : Dict = rename_fairseq_keys(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE : Any = os.path.join(
_snake_case , weights_name.replace(".bin" , F'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
torch.save(_snake_case , _snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_snake_case )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE : int = os.path.join(_snake_case , weights_name.replace(".bin" , F'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
SCREAMING_SNAKE_CASE : Tuple = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_snake_case )
SCREAMING_SNAKE_CASE : List[Any] = rename_fairseq_keys(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE : Tuple = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_snake_case ) == 1:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_snake_case , _snake_case )
torch.save(_snake_case , _snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_snake_case , _snake_case )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE : List[str] = {}
for idx, shard in enumerate(_snake_case ):
SCREAMING_SNAKE_CASE : Dict = weights_name.replace(".bin" , F'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' )
SCREAMING_SNAKE_CASE : Any = os.path.join(_snake_case , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) )
for key in shard:
SCREAMING_SNAKE_CASE : Tuple = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE : str = {"total_size": total_size}
SCREAMING_SNAKE_CASE : str = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_snake_case , _snake_case ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + "\n"
f.write(_snake_case )
return metadata, index
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
snake_case = parser.parse_args()
snake_case = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
snake_case = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 369
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case = logging.get_logger(__name__)
snake_case = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = '''deformable_detr'''
UpperCamelCase_ : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=300 , UpperCAmelCase_ : Any=1024 , UpperCAmelCase_ : int=6 , UpperCAmelCase_ : Dict=1024 , UpperCAmelCase_ : Optional[int]=8 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : Optional[Any]=1024 , UpperCAmelCase_ : Tuple=8 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]="relu" , UpperCAmelCase_ : str=256 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Tuple=1.0 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : str="sine" , UpperCAmelCase_ : Optional[int]="resnet50" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[str]=300 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : str=5 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Any=5 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Any=0.25 , UpperCAmelCase_ : Dict=False , **UpperCAmelCase_ : Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_a , _a ):
SCREAMING_SNAKE_CASE : Dict = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_a )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : List[str] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = num_queries
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE : List[str] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = dropout
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE : List[str] = init_std
SCREAMING_SNAKE_CASE : str = init_xavier_std
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = backbone
SCREAMING_SNAKE_CASE : int = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : Dict = num_feature_levels
SCREAMING_SNAKE_CASE : int = encoder_n_points
SCREAMING_SNAKE_CASE : Optional[int] = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : Tuple = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Optional[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
SCREAMING_SNAKE_CASE : str = class_cost
SCREAMING_SNAKE_CASE : int = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : int = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : List[str] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : List[str] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_a , **_a )
@property
def _A ( self : List[Any] ):
return self.encoder_attention_heads
@property
def _A ( self : Dict ):
return self.d_model
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Any = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
| 370
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
snake_case = {
"""google/pegasus-xsum""": 512,
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PegasusTokenizer
UpperCamelCase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is'''
f''' {type(UpperCAmelCase_ )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 319
| 0
|
import datasets
snake_case = """\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"""
snake_case = """\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"""
snake_case = """\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 371
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 350
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : int = 2
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Any = config["lr"]
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : Any = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 319
| 0
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
snake_case = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
snake_case = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
snake_case = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
snake_case = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
snake_case = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
snake_case = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 351
|
import functools
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowercase ) >= 366:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE : Dict = set(lowercase )
@functools.cache
def dynamic_programming(lowercase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
def merge(lowercase , lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase ) <= 1:
return collection
SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = input("""Enter numbers separated by a comma:\n""").strip()
snake_case = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 352
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 319
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( lowercase , lowercase=False ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE : int = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE : Optional[Any] = strtobool(lowerCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
snake_case = parse_flag_from_env("""RUN_SLOW""", default=False)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skip("Test was skipped" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , "test is slow" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase=None , lowercase=None ):
"""simple docstring"""
if test_case is None:
return partial(lowerCamelCase__ , version=lowerCamelCase__ )
return unittest.skipUnless(is_torch_version(">=" , lowerCamelCase__ ) , F'''test requires torch version >= {version}''' )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(lowerCamelCase__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(lowerCamelCase__ )
snake_case = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = True
@classmethod
def _A ( cls : int ):
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
@classmethod
def _A ( cls : int ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _A ( self : Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[str] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Tuple , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : int = mocks if isinstance(UpperCAmelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = AcceleratorState()
SCREAMING_SNAKE_CASE : List[str] = tensor[None].clone().to(state.device )
SCREAMING_SNAKE_CASE : Optional[int] = gather(lowerCamelCase__ ).cpu()
SCREAMING_SNAKE_CASE : Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase__ ):
return False
return True
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[str] = returncode
SCREAMING_SNAKE_CASE : Dict = stdout
SCREAMING_SNAKE_CASE : Union[str, Any] = stderr
async def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE : List[Any] = await stream.readline()
if line:
callback(lowerCamelCase__ )
else:
break
async def lowerCamelCase__ ( lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=False , lowercase=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : str = []
def tee(lowercase , lowercase , lowercase , lowercase="" ):
SCREAMING_SNAKE_CASE : Optional[int] = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase__ )
if not quiet:
print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowercase : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowercase : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label="stderr:" ) ) ),
] , timeout=lowerCamelCase__ , )
return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase__ ( lowercase , lowercase=None , lowercase=None , lowercase=180 , lowercase=False , lowercase=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE : List[str] = " ".join(lowerCamelCase__ )
if result.returncode > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = "\n".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
pass
def lowerCamelCase__ ( lowercase , lowercase=False ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase__ , "decode" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 353
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=13 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : List[str]=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str=10 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=2 , ):
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Tuple = max_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_mel_bins
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
SCREAMING_SNAKE_CASE : int = frequency_stride
SCREAMING_SNAKE_CASE : Optional[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE : int = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE : str = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE : Tuple = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE : Any = num_patches + 2
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_values, labels
def _A ( self : List[str] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _A ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Dict = ASTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Union[str, Any] = False
def _A ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = ASTModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _A ( self : Optional[Any] ):
pass
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _A ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ["input_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def _A ( self : Optional[Any] ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = ASTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = torchaudio.load(_lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Tuple ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : str = self.default_feature_extractor
SCREAMING_SNAKE_CASE : str = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = self.default_feature_extractor
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = prepare_audio()
SCREAMING_SNAKE_CASE : Dict = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 354
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''ClapFeatureExtractor'''
UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 319
| 0
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = [10, 20, 30, 40, 50, 60]
SCREAMING_SNAKE_CASE : int = [2, 4, 6, 8, 10, 12]
SCREAMING_SNAKE_CASE : Any = 100
self.assertEqual(kp.calc_profit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 210 )
def _A ( self : Optional[int] ):
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "max_weight must greater than zero." )
def _A ( self : Any ):
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "Weight can not be negative." )
def _A ( self : Optional[int] ):
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "Profit can not be negative." )
def _A ( self : Union[str, Any] ):
self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , "max_weight must greater than zero." )
def _A ( self : List[Any] ):
self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 355
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Optional[int] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path
elif issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Tuple = "train"
SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path}
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
SCREAMING_SNAKE_CASE : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase )
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert get_writer_batch_size(lowercase ) == expected
| 319
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : Dict = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : List[str] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : Dict = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE : str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : int = config['lr']
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Tuple = int(config["batch_size"] )
set_seed(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataloaders(lowercase , lowercase )
SCREAMING_SNAKE_CASE : List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE : Any = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : int = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Optional[Any] = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE : List[Any] = os.path.split(lowercase )[-1].split("." )[0]
accelerator.init_trackers(lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE : str = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE : Tuple = model(**lowercase )
SCREAMING_SNAKE_CASE : Any = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowercase ),
"epoch": epoch,
} , step=lowercase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
SCREAMING_SNAKE_CASE : str = parser.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 356
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
from __future__ import annotations
from math import pi
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE : int = getattr(A__ , A__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Dict = getattr(A__ , A__ ).shape
else:
SCREAMING_SNAKE_CASE : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Tuple = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Any = value
else:
SCREAMING_SNAKE_CASE : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(A__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE : List[str] = mapped_key.replace("*" , A__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : List[Any] = "weight"
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = name.split("." )
SCREAMING_SNAKE_CASE : Tuple = int(items[0] )
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A__ )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.load(A__ )
SCREAMING_SNAKE_CASE : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
SCREAMING_SNAKE_CASE : Optional[Any] = WavLMOrig(A__ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = WavLMConfig.from_pretrained(A__ )
else:
SCREAMING_SNAKE_CASE : Any = WavLMConfig()
SCREAMING_SNAKE_CASE : List[Any] = WavLMModel(A__ )
recursively_load_weights(A__ , A__ )
hf_wavlm.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 358
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case = get_logger(__name__)
snake_case = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
for processor in self:
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters
if len(UpperCAmelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : float ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
SCREAMING_SNAKE_CASE : Optional[int] = temperature
def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
SCREAMING_SNAKE_CASE : Optional[int] = top_p
SCREAMING_SNAKE_CASE : str = filter_value
SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] )
SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value )
SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ )
# min tokens to keep
SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1]
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = filter_value
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape
SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value )
SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten()
SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = max_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
SCREAMING_SNAKE_CASE : List[str] = min_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
# create boolean flag to decide if min length penalty should be applied
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = begin_index
def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
def _force_token(UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = scores.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" )
SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) )
return new_scores
SCREAMING_SNAKE_CASE : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id
SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ):
SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
# suppress <|notimestamps|> which is handled by without_timestamps
SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , )
return jnp.where(
UpperCAmelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(
UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 )
def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
return scores
| 319
| 0
|
def lowerCamelCase__ ( lowercase = 10 ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or n < 0:
raise ValueError("Invalid input" )
UpperCamelCase__ : List[Any] = 10**n
UpperCamelCase__ : int = 28433 * (pow(2 , 7830457 , _UpperCAmelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 359
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 319
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
SCREAMING_SNAKE_CASE : Any = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
SCREAMING_SNAKE_CASE : Tuple = components[:-1] + [test_fn.replace(".py" , "" )]
SCREAMING_SNAKE_CASE : Optional[Any] = ".".join(a__ )
return test_module_path
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_module_path(a__ )
SCREAMING_SNAKE_CASE : str = importlib.import_module(a__ )
return test_module
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = get_test_module(a__ )
for attr in dir(a__ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(a__ , a__ ) )
# sort with class names
return sorted(a__ , key=lambda lowercase : x.__name__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : str = get_test_module(a__ )
for attr in dir(a__ ):
SCREAMING_SNAKE_CASE : Tuple = getattr(a__ , a__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE : str = getattr(a__ , "all_model_classes" , [] )
if len(a__ ) > 0:
test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda lowercase : x.__name__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = get_test_classes(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(a__ , key=lambda lowercase : x.__name__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = test_class()
if hasattr(a__ , "setUp" ):
test.setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = None
if hasattr(a__ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE : Tuple = test.model_tester.__class__
return model_tester
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_test_classes(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda lowercase : x.__name__ )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = get_test_classes_for_model(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_model_tester_from_test_class(a__ )
if tester_class is not None:
tester_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda lowercase : x.__name__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = get_test_classes(a__ )
SCREAMING_SNAKE_CASE : Dict = {test_class: get_model_tester_from_test_class(a__ ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_model_classes(a__ )
SCREAMING_SNAKE_CASE : int = {
model_class: get_test_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_model_classes(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
model_class: get_tester_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if isinstance(a__ , a__ ):
return o
elif isinstance(a__ , a__ ):
return o.__name__
elif isinstance(a__ , (list, tuple) ):
return [to_json(a__ ) for x in o]
elif isinstance(a__ , a__ ):
return {to_json(a__ ): to_json(a__ ) for k, v in o.items()}
else:
return o
| 360
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319
| 0
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = len(lowercase )
SCREAMING_SNAKE_CASE : Tuple = len(lowercase )
SCREAMING_SNAKE_CASE : Any = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE : Optional[int] = True
for i in range(lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE : Tuple = True
if a[i].islower():
SCREAMING_SNAKE_CASE : Tuple = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319
| 0
|
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
snake_case = logging.get_logger(__name__)
snake_case = {}
snake_case = {}
snake_case = {}
def lowerCamelCase__ ( lowercase , lowercase , lowercase = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
SCREAMING_SNAKE_CASE : int = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
SCREAMING_SNAKE_CASE : Tuple = format_type
def lowerCamelCase__ ( lowercase , lowercase , lowercase = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
SCREAMING_SNAKE_CASE : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
snake_case = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
snake_case = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
snake_case = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase__ ( lowercase , **lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = get_format_type_from_alias(__lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 362
|
def lowerCamelCase__ ( ):
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
snake_case = generate_large_matrix()
snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2
SCREAMING_SNAKE_CASE : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE : List[Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = len(grid[0] )
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def lowerCamelCase__ ( ):
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
SCREAMING_SNAKE_CASE : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 319
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
snake_case = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = "src/transformers"
shutil.rmtree(self.transformer_dir )
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE : List[Any] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
SCREAMING_SNAKE_CASE : Dict = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.transformer_dir , "new_code.py" )
with open(__SCREAMING_SNAKE_CASE , "w" , newline="\n" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , "r" ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _A ( self : Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE : List[str] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub("Bert" , "TestModel" , __SCREAMING_SNAKE_CASE ) , )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
SCREAMING_SNAKE_CASE : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
SCREAMING_SNAKE_CASE : List[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
SCREAMING_SNAKE_CASE : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["format_model_list"] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
SCREAMING_SNAKE_CASE : Any = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
SCREAMING_SNAKE_CASE : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 363
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case = ["""small""", """medium""", """large"""]
snake_case = """lm_head.decoder.weight"""
snake_case = """lm_head.weight"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase )
SCREAMING_SNAKE_CASE : Any = d.pop(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
snake_case = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 319
| 0
|
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = size
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * size
SCREAMING_SNAKE_CASE : List[str] = [0] * size
@staticmethod
def _A ( UpperCAmelCase_ : int ):
return index | (index + 1)
@staticmethod
def _A ( UpperCAmelCase_ : int ):
return (index & (index + 1)) - 1
def _A ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Tuple = value
while index < self.size:
SCREAMING_SNAKE_CASE : str = self.get_prev(_SCREAMING_SNAKE_CASE ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_next(_SCREAMING_SNAKE_CASE )
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE : int = 0
while left <= right:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_prev(_SCREAMING_SNAKE_CASE )
if left <= current_left:
SCREAMING_SNAKE_CASE : Union[str, Any] = max(_SCREAMING_SNAKE_CASE , self.tree[right] )
SCREAMING_SNAKE_CASE : List[Any] = current_left
else:
SCREAMING_SNAKE_CASE : Any = max(_SCREAMING_SNAKE_CASE , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case = logging.get_logger(__name__)
snake_case = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
snake_case = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
snake_case = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
snake_case = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
snake_case = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
snake_case = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
snake_case = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
snake_case = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
snake_case = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
snake_case = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
snake_case = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
snake_case = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
snake_case = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
snake_case = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = FLAX_MODEL_MAPPING
snake_case = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Tuple = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase_ : Any = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 365
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 319
| 0
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
SCREAMING_SNAKE_CASE : int = precision
SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 )
SCREAMING_SNAKE_CASE : List[str] = 426880 * Decimal(10005 ).sqrt()
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = 13591409
SCREAMING_SNAKE_CASE : Optional[Any] = Decimal(SCREAMING_SNAKE_CASE_ )
for k in range(1 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE : Optional[int] = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 366
|
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts
SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets
SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = src_parent
SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ )
return True
def _A ( self : Tuple , UpperCAmelCase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 319
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = """deit"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : List[Any]=3072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=16 , **UpperCAmelCase_ : Any , ):
super().__init__(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE : Optional[int] = encoder_stride
class SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Dict = version.parse('''1.11''' )
@property
def _A ( self : Any ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self : int ):
return 1E-4
| 367
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''timm_backbone'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
| 319
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = StableDiffusionLDMaDPipeline
UpperCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=0 ):
if str(__lowerCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : int = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE : int = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
SCREAMING_SNAKE_CASE : Dict = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : int = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = 3 * [inputs["""prompt"""]]
# forward
SCREAMING_SNAKE_CASE : Dict = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Tuple = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = depth_slice_a[0, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop("prompt" )]
SCREAMING_SNAKE_CASE : List[Any] = ldmad_pipe.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : List[str] = text_inputs["""input_ids"""].to(__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = ldmad_pipe.text_encoder(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : Dict = prompt_embeds
# forward
SCREAMING_SNAKE_CASE : str = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : str = """french fries"""
SCREAMING_SNAKE_CASE : Optional[int] = ldmad_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : List[Any] = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE : Dict = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
SCREAMING_SNAKE_CASE : Any = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple="cpu" , UpperCAmelCase_ : str=torch.floataa , UpperCAmelCase_ : Tuple=0 ):
SCREAMING_SNAKE_CASE : str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE : str = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
SCREAMING_SNAKE_CASE : Any = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : int = rgb[0, -3:, -3:, -1].flatten()
SCREAMING_SNAKE_CASE : Optional[Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict="cpu" , UpperCAmelCase_ : int=torch.floataa , UpperCAmelCase_ : Union[str, Any]=0 ):
SCREAMING_SNAKE_CASE : str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Dict = 0.495_586
SCREAMING_SNAKE_CASE : Dict = 0.33_795_515
SCREAMING_SNAKE_CASE : Union[str, Any] = 112.48_518
SCREAMING_SNAKE_CASE : Any = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ldmad_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Optional[int] = 0.4_194_127
SCREAMING_SNAKE_CASE : str = 0.35_375_586
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.5_638_502
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 368
|
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@require_torch
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : int = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
SCREAMING_SNAKE_CASE : Union[str, Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
SCREAMING_SNAKE_CASE : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task="fill-mask" , model=lowercase_ )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE : Union[str, Any] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE : Dict = "1"
SCREAMING_SNAKE_CASE : Optional[Any] = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
SCREAMING_SNAKE_CASE : List[str] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
SCREAMING_SNAKE_CASE : List[str] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE : str = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task="fill-mask" , model=lowercase_ )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE : List[Any] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_env()
SCREAMING_SNAKE_CASE : Dict = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
SCREAMING_SNAKE_CASE : List[str] = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
SCREAMING_SNAKE_CASE : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE : Dict = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE : str = self.get_env()
SCREAMING_SNAKE_CASE : Optional[Any] = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE : int = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE : List[str] = "1"
SCREAMING_SNAKE_CASE : Any = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "\nfrom transformers import pipeline\n "
SCREAMING_SNAKE_CASE : List[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
SCREAMING_SNAKE_CASE : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
SCREAMING_SNAKE_CASE : int = self.get_env()
SCREAMING_SNAKE_CASE : Any = "1"
SCREAMING_SNAKE_CASE : Union[str, Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
SCREAMING_SNAKE_CASE : Optional[int] = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "\nfrom transformers import AutoModel\n "
SCREAMING_SNAKE_CASE : int = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE : str = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE : Dict = self.get_env()
SCREAMING_SNAKE_CASE : Optional[int] = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE : Tuple = "1"
SCREAMING_SNAKE_CASE : Optional[int] = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 369
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Dict=None , ):
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_multiple_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : List[str] = weight_tying
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : Any = num_choices
SCREAMING_SNAKE_CASE : str = scope
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _A ( self : List[Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _A ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = GPTNeoXJapaneseModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : int = GPTNeoXJapaneseModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : Optional[int] = GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past["hidden_states"][0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCamelCase_ : Union[str, Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ : List[str] = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCamelCase_ : str = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : str = False
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTNeoXJapaneseModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def _A ( self : Any ):
self.config_tester.run_common_tests()
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[str] ):
# This regression test was failing with PyTorch < 1.3
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase_ )
@slow
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Any = "abeja/gpt-neox-japanese-2.7b"
SCREAMING_SNAKE_CASE : Dict = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
SCREAMING_SNAKE_CASE : List[Any] = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
SCREAMING_SNAKE_CASE : int = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for prompt in prompts:
SCREAMING_SNAKE_CASE : List[str] = tokenizer(UpperCAmelCase_ , return_tensors="pt" ).input_ids
SCREAMING_SNAKE_CASE : List[str] = model.generate(UpperCAmelCase_ , max_length=50 )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 370
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
snake_case = {
"""google/pegasus-xsum""": 512,
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PegasusTokenizer
UpperCamelCase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is'''
f''' {type(UpperCAmelCase_ )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 319
| 0
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE : set[int] = set()
return any(
node not in visited and depth_first_search(lowercase , lowercase , lowercase , lowercase )
for node in graph )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
visited.add(lowercase )
rec_stk.add(lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase , lowercase , lowercase , lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : int = 2
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Any = config["lr"]
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : Any = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 319
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase_ : Union[str, Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE : List[Any] = text_generator("This is a test" , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
SCREAMING_SNAKE_CASE : str = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
_snake_case , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
SCREAMING_SNAKE_CASE : List[str] = text_generator("This is a test" , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{"generated_token_ids": ANY(_snake_case )},
{"generated_token_ids": ANY(_snake_case )},
] , )
SCREAMING_SNAKE_CASE : int = text_generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = "<pad>"
SCREAMING_SNAKE_CASE : Dict = text_generator(
["This is a test", "This is a second test"] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{"generated_token_ids": ANY(_snake_case )},
{"generated_token_ids": ANY(_snake_case )},
],
[
{"generated_token_ids": ANY(_snake_case )},
{"generated_token_ids": ANY(_snake_case )},
],
] , )
@require_tf
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE : Dict = text_generator("This is a test" , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
SCREAMING_SNAKE_CASE : Dict = text_generator(["This is a test", "This is a second test"] , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def _A ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : str = TextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return text_generator, ["This is a test", "Another test"]
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Any = "Hello I believe in"
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE : Any = text_generator(_snake_case )
self.assertEqual(
_snake_case , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
SCREAMING_SNAKE_CASE : int = text_generator(_snake_case , stop_sequence=" fe" )
self.assertEqual(_snake_case , [{"generated_text": "Hello I believe in fe"}] )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = text_generator.model
SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator.tokenizer
SCREAMING_SNAKE_CASE : List[str] = text_generator("This is a test" )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
SCREAMING_SNAKE_CASE : List[str] = text_generator("This is a test" , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
SCREAMING_SNAKE_CASE : Any = pipeline(task="text-generation" , model=_snake_case , tokenizer=_snake_case , return_full_text=_snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator("This is a test" )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
SCREAMING_SNAKE_CASE : Dict = text_generator("This is a test" , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}],
[{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
SCREAMING_SNAKE_CASE : List[str] = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}],
[{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
SCREAMING_SNAKE_CASE : List[str] = text_generator("test" , return_full_text=_snake_case , return_text=_snake_case )
with self.assertRaises(_snake_case ):
SCREAMING_SNAKE_CASE : List[Any] = text_generator("test" , return_full_text=_snake_case , return_tensors=_snake_case )
with self.assertRaises(_snake_case ):
SCREAMING_SNAKE_CASE : int = text_generator("test" , return_text=_snake_case , return_tensors=_snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
SCREAMING_SNAKE_CASE : Dict = text_generator("" )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
SCREAMING_SNAKE_CASE : Optional[Any] = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
SCREAMING_SNAKE_CASE : List[Any] = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
SCREAMING_SNAKE_CASE : List[str] = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_snake_case ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _A ( self : Any ):
import torch
# Classic `model_kwargs`
SCREAMING_SNAKE_CASE : Any = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe("This is a test" )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
SCREAMING_SNAKE_CASE : str = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE : Any = pipe("This is a test" )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe("This is a test" )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def _A ( self : str ):
import torch
SCREAMING_SNAKE_CASE : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def _A ( self : Any ):
import torch
SCREAMING_SNAKE_CASE : List[Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=_snake_case , top_p=0.5 )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Tuple = "Hello world"
SCREAMING_SNAKE_CASE : str = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger("transformers.generation.tf_utils" )
else:
SCREAMING_SNAKE_CASE : str = logging.get_logger("transformers.generation.utils" )
SCREAMING_SNAKE_CASE : Dict = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_snake_case ) as cl:
SCREAMING_SNAKE_CASE : str = text_generator(_snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(_snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_snake_case ) as cl:
SCREAMING_SNAKE_CASE : Any = text_generator(_snake_case , max_new_tokens=1 )
self.assertNotIn(_snake_case , cl.out )
with CaptureLogger(_snake_case ) as cl:
SCREAMING_SNAKE_CASE : List[str] = text_generator(_snake_case , max_length=10 )
self.assertNotIn(_snake_case , cl.out )
| 351
|
import functools
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowercase ) >= 366:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE : Dict = set(lowercase )
@functools.cache
def dynamic_programming(lowercase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE : int = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : int = f'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split()
SCREAMING_SNAKE_CASE : str = [sys.executable] + distributed_args
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() )
| 352
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 319
| 0
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = "laion/clap-htsat-unfused"
SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
def _A ( self : Tuple , **UpperCAmelCase_ : List[str] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def _A ( self : List[Any] , **UpperCAmelCase_ : Optional[Any] ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def _A ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Union[str, Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE : Tuple = self.get_feature_extractor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE : str = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = floats_list((3, 1000) )
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(UpperCAmelCase_ , return_tensors="np" )
SCREAMING_SNAKE_CASE : str = processor(audios=UpperCAmelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Any = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = "This is a test string"
SCREAMING_SNAKE_CASE : str = processor(text=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : str = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : List[str] = processor.batch_decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Any = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 353
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319
| 0
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _A ( *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ):
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Tuple = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier(UpperCAmelCase_ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase_ ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
SCREAMING_SNAKE_CASE : str = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
] , )
@require_tf
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Dict = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE : Dict = image_classifier(UpperCAmelCase_ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
SCREAMING_SNAKE_CASE : int = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
{"score": 0.333, "label": ANY(UpperCAmelCase_ )},
],
] , )
@slow
@require_torch
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : str = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_classifier(UpperCAmelCase_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE : int = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier(UpperCAmelCase_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE : Tuple = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 354
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''ClapFeatureExtractor'''
UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 319
| 0
|
"""simple docstring"""
import qiskit
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
SCREAMING_SNAKE_CASE : Dict = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : Optional[Any] = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(lowercase )
if __name__ == "__main__":
snake_case = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 355
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Optional[int] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path
elif issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Tuple = "train"
SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path}
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
SCREAMING_SNAKE_CASE : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase )
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert get_writer_batch_size(lowercase ) == expected
| 319
| 0
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 356
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 357
|
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
from math import pow, sqrt
def lowerCamelCase__ ( *lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = len(A_ ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A_ , A_ )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 358
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case = get_logger(__name__)
snake_case = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
for processor in self:
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters
if len(UpperCAmelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : float ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
SCREAMING_SNAKE_CASE : Optional[int] = temperature
def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
SCREAMING_SNAKE_CASE : Optional[int] = top_p
SCREAMING_SNAKE_CASE : str = filter_value
SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] )
SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value )
SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ )
# min tokens to keep
SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1]
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = filter_value
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape
SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value )
SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten()
SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = max_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
SCREAMING_SNAKE_CASE : List[str] = min_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
# create boolean flag to decide if min length penalty should be applied
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = begin_index
def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
def _force_token(UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = scores.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" )
SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) )
return new_scores
SCREAMING_SNAKE_CASE : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id
SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ):
SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
# suppress <|notimestamps|> which is handled by without_timestamps
SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , )
return jnp.where(
UpperCAmelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(
UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 )
def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
return scores
| 319
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@require_torch
def _A ( self : Dict ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase__ : List[str] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCamelCase__ : Union[str, Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCamelCase__ : Any = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCamelCase__ : str = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task="fill-mask" , model=__A )
# baseline - just load from_pretrained with normal network
UpperCamelCase__ : str = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCamelCase__ : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase__ : Optional[Any] = "1"
UpperCamelCase__ : Optional[int] = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _A ( self : Dict ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase__ : Optional[int] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCamelCase__ : Optional[int] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCamelCase__ : Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCamelCase__ : int = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task="fill-mask" , model=__A )
# baseline - just load from_pretrained with normal network
UpperCamelCase__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCamelCase__ : List[Any] = self.get_env()
UpperCamelCase__ : Union[str, Any] = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _A ( self : Any ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase__ : Dict = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
UpperCamelCase__ : Union[str, Any] = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
UpperCamelCase__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
UpperCamelCase__ : str = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCamelCase__ : int = self.get_env()
UpperCamelCase__ : Dict = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
UpperCamelCase__ : str = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase__ : Optional[Any] = "1"
UpperCamelCase__ : Union[str, Any] = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _A ( self : Optional[int] ):
UpperCamelCase__ : Optional[Any] = "\nfrom transformers import pipeline\n "
UpperCamelCase__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
UpperCamelCase__ : str = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
UpperCamelCase__ : List[str] = self.get_env()
UpperCamelCase__ : Union[str, Any] = "1"
UpperCamelCase__ : str = [sys.executable, "-c", "\n".join([load, mock, run] )]
UpperCamelCase__ : Optional[Any] = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _A ( self : Optional[int] ):
UpperCamelCase__ : int = "\nfrom transformers import AutoModel\n "
UpperCamelCase__ : Tuple = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
UpperCamelCase__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCamelCase__ : List[str] = self.get_env()
UpperCamelCase__ : Optional[int] = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase__ : int = "1"
UpperCamelCase__ : int = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 359
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 319
| 0
|
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
snake_case = "pytorch_model.bin"
snake_case = "pytorch_model.bin.index.json"
snake_case = "adapter_config.json"
snake_case = "adapter_model.bin"
snake_case = "adapter_model.safetensors"
snake_case = "tf_model.h5"
snake_case = "tf_model.h5.index.json"
snake_case = "model.ckpt"
snake_case = "flax_model.msgpack"
snake_case = "flax_model.msgpack.index.json"
snake_case = "model.safetensors"
snake_case = "model.safetensors.index.json"
snake_case = "config.json"
snake_case = "preprocessor_config.json"
snake_case = FEATURE_EXTRACTOR_NAME
snake_case = "generation_config.json"
snake_case = "modelcard.json"
snake_case = "▁"
snake_case = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
snake_case = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
snake_case = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
snake_case = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if version.parse(lowercase ) < version.parse(lowercase ):
if "dev" in min_version:
SCREAMING_SNAKE_CASE : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
SCREAMING_SNAKE_CASE : Optional[int] = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 360
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319
| 0
|
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = 1
@register_to_config
def __init__( self : int , UpperCAmelCase_ : str=2000 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=20 , UpperCAmelCase_ : List[Any]=1E-3 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = None
def _A ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] = None ):
SCREAMING_SNAKE_CASE : Tuple = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase_ , device=lowerCamelCase_ )
def _A ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE : str = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE : Dict = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE : Dict = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE : int = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = -score / std
# compute
SCREAMING_SNAKE_CASE : Tuple = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE : str = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE : Optional[int] = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE : Dict = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.sqrt(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE : Union[str, Any] = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase_ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE : List[str] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Dict ):
return self.config.num_train_timesteps
| 361
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ : Union[str, Any] = """CLIPImageProcessor"""
UpperCamelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Tuple , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case__ , )
SCREAMING_SNAKE_CASE : int = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case__ , snake_case__ )
def __call__( self : Tuple , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Union[str, Any] ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
SCREAMING_SNAKE_CASE : Tuple = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def _A ( self : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _A ( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ):
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _A ( self : Dict ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , )
return self.image_processor_class
@property
def _A ( self : Optional[int] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case__ , )
return self.image_processor
| 362
|
def lowerCamelCase__ ( ):
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
snake_case = generate_large_matrix()
snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2
SCREAMING_SNAKE_CASE : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE : List[Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = len(grid[0] )
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def lowerCamelCase__ ( ):
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
SCREAMING_SNAKE_CASE : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 319
| 0
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = FlaxAutoencoderKL
@property
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Dict = 4
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Optional[int] = (32, 32)
SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : List[str] = jax.random.uniform(lowerCAmelCase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : str = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
SCREAMING_SNAKE_CASE : int = self.dummy_input
return init_dict, inputs_dict
| 363
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case = ["""small""", """medium""", """large"""]
snake_case = """lm_head.decoder.weight"""
snake_case = """lm_head.weight"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase )
SCREAMING_SNAKE_CASE : Any = d.pop(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
snake_case = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 319
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert("RGB" )
return image
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = dct.pop(lowercase )
SCREAMING_SNAKE_CASE : List[str] = val
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 364 if "coco" in model_name else 224
SCREAMING_SNAKE_CASE : str = InstructBlipVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
SCREAMING_SNAKE_CASE : Tuple = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
SCREAMING_SNAKE_CASE : List[str] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
SCREAMING_SNAKE_CASE : Any = InstructBlipConfig(vision_config=lowercase , text_config=lowercase , qformer_config=lowercase )
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase=None , lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
SCREAMING_SNAKE_CASE : Any = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
SCREAMING_SNAKE_CASE : List[Any] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_blipa_config(lowercase )
SCREAMING_SNAKE_CASE : List[str] = InstructBlipForConditionalGeneration(lowercase ).eval()
SCREAMING_SNAKE_CASE : int = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
SCREAMING_SNAKE_CASE : Optional[int] = "cuda:1" if torch.cuda.is_available() else "cpu"
SCREAMING_SNAKE_CASE : Any = "cuda:2" if torch.cuda.is_available() else "cpu"
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print("Done!" )
# update state dict keys
SCREAMING_SNAKE_CASE : Optional[int] = original_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowercase )
if key.startswith("Qformer.bert" ):
SCREAMING_SNAKE_CASE : str = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
SCREAMING_SNAKE_CASE : Any = key.replace("self" , "attention" )
if "llm_proj" in key:
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE : int = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
SCREAMING_SNAKE_CASE : List[str] = key.replace("t5" , "language" )
SCREAMING_SNAKE_CASE : Dict = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowercase , strict=lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = load_demo_image()
SCREAMING_SNAKE_CASE : Any = "What is unusual about this image?"
# create processor
SCREAMING_SNAKE_CASE : List[str] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=lowercase , image_std=lowercase )
SCREAMING_SNAKE_CASE : List[str] = InstructBlipProcessor(
image_processor=lowercase , tokenizer=lowercase , qformer_tokenizer=lowercase , )
SCREAMING_SNAKE_CASE : int = processor(images=lowercase , text=lowercase , return_tensors="pt" ).to(lowercase )
# make sure processor creates exact same pixel values
SCREAMING_SNAKE_CASE : Optional[Any] = vis_processors["eval"](lowercase ).unsqueeze(0 ).to(lowercase )
SCREAMING_SNAKE_CASE : int = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "vicuna" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
SCREAMING_SNAKE_CASE : List[Any] = hf_model(**lowercase ).logits
else:
SCREAMING_SNAKE_CASE : Tuple = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
SCREAMING_SNAKE_CASE : Any = tokenizer("\n" , return_tensors="pt" ).input_ids.to(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
SCREAMING_SNAKE_CASE : Optional[int] = hf_model(**lowercase , labels=lowercase ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
SCREAMING_SNAKE_CASE : Union[str, Any] = 1E-4 if "vicuna" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , lowercase , atol=lowercase )
print("Looks ok!" )
print("Generating with original model..." )
SCREAMING_SNAKE_CASE : int = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
SCREAMING_SNAKE_CASE : str = hf_model.generate(
**lowercase , do_sample=lowercase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
SCREAMING_SNAKE_CASE : Dict = 2
print("Original generation:" , lowercase )
SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(lowercase , skip_special_tokens=lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print("HF generation:" , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
snake_case = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
import argparse
import os
import re
import packaging.version
snake_case = """examples/"""
snake_case = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
snake_case = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
snake_case = """README.md"""
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE : Dict = replace.replace("VERSION" , snake_case__ )
SCREAMING_SNAKE_CASE : Optional[Any] = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(snake_case__ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern="examples" )
def lowerCamelCase__ ( lowercase , lowercase=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE : Dict = "1. Want to contribute a new model?"
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE : int = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE : Optional[int] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(snake_case__ )
def lowerCamelCase__ ( ):
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE : Any = f.read()
SCREAMING_SNAKE_CASE : Dict = REPLACE_PATTERNS["init"][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def lowerCamelCase__ ( lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE : int = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
SCREAMING_SNAKE_CASE : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE : Optional[int] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(snake_case__ ) == 0:
SCREAMING_SNAKE_CASE : Tuple = default_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case__ , patch=snake_case__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_version()
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
SCREAMING_SNAKE_CASE : Optional[Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE : Union[str, Any] = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(snake_case__ ) == 0:
SCREAMING_SNAKE_CASE : List[Any] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
snake_case = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 365
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 319
| 0
|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = (DPMSolverSDEScheduler,)
UpperCamelCase_ : List[str] = 1_0
def _A ( self : List[str] , **UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _A ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _A ( self : Optional[Any] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _A ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _A ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[Any] = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : str = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ , use_karras_sigmas=SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Dict = sample.to(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : int = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Tuple = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 366
|
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts
SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets
SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = src_parent
SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ )
return True
def _A ( self : Tuple , UpperCAmelCase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 319
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : Optional[int] = process
SCREAMING_SNAKE_CASE : Optional[int] = params
def __len__( self : List[Any] ):
return len(self.dataset )
def __getitem__( self : int , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Dict = self.dataset[i]
SCREAMING_SNAKE_CASE : Dict = self.process(UpperCamelCase__ , **self.params )
return processed
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=None ):
SCREAMING_SNAKE_CASE : Optional[int] = loader
SCREAMING_SNAKE_CASE : Optional[Any] = infer
SCREAMING_SNAKE_CASE : Optional[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[int] = None
def __len__( self : Any ):
return len(self.loader )
def __iter__( self : int ):
SCREAMING_SNAKE_CASE : Any = iter(self.loader )
return self
def _A ( self : Tuple ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Dict = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Dict = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : Optional[int] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def _A ( self : List[str] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Any = next(self.iterator )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = processed
else:
SCREAMING_SNAKE_CASE : Optional[int] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : Any = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : Optional[Any] = processed
SCREAMING_SNAKE_CASE : Any = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : Optional[int] = None
return self
def _A ( self : List[Any] ):
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : List[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : List[Any] = next(self.subiterator )
return processed
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __iter__( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[Any] = iter(self.loader )
return self
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : List[str] = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Optional[Any] = item.pop("is_last" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Tuple = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = processed
SCREAMING_SNAKE_CASE : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : str = self.loader_batch_item()
SCREAMING_SNAKE_CASE : str = item.pop("is_last" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : int = item.pop("is_last" )
accumulator.append(UpperCamelCase__ )
return accumulator
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : List[Any] = dataset
SCREAMING_SNAKE_CASE : Tuple = key
def __len__( self : Optional[Any] ):
return len(self.dataset )
def __getitem__( self : Optional[int] , UpperCAmelCase_ : Dict ):
return self.dataset[i][self.key]
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : Dict = keya
SCREAMING_SNAKE_CASE : int = keya
def __len__( self : Tuple ):
return len(self.dataset )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : Any ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 367
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''timm_backbone'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
| 319
| 0
|
import operator as op
snake_case = "scaler.pt"
snake_case = "pytorch_model"
snake_case = "random_states"
snake_case = "optimizer"
snake_case = "scheduler"
snake_case = "pytorch_model.bin"
snake_case = "pytorch_model.bin.index.json"
snake_case = "model.safetensors"
snake_case = "model.safetensors.index.json"
snake_case = "1.10.2"
snake_case = "py38"
snake_case = "4.17.0"
snake_case = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
snake_case = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
snake_case = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
snake_case = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
snake_case = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
snake_case = "2.0.1"
snake_case = ["pdsh", "standard", "openmpi", "mvapich"]
snake_case = ["default", "reduce-overhead", "max-autotune"]
snake_case = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
snake_case = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
snake_case = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
snake_case = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 368
|
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : Dict = '''BridgeTowerImageProcessor'''
UpperCamelCase_ : Optional[int] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : List[Any] , ):
SCREAMING_SNAKE_CASE : int = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def _A ( self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _A ( self : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 369
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
from __future__ import annotations
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = order
# a_{0} ... a_{k}
SCREAMING_SNAKE_CASE : Dict = [1.0] + [0.0] * order
# b_{0} ... b_{k}
SCREAMING_SNAKE_CASE : Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
SCREAMING_SNAKE_CASE : Optional[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
SCREAMING_SNAKE_CASE : str = [0.0] * self.order
def _A ( self : List[Any] , UpperCAmelCase_ : list[float] , UpperCAmelCase_ : list[float] ):
if len(_lowercase ) < self.order:
SCREAMING_SNAKE_CASE : int = [1.0, *a_coeffs]
if len(_lowercase ) != self.order + 1:
SCREAMING_SNAKE_CASE : Tuple = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowercase )}'''
)
raise ValueError(_lowercase )
if len(_lowercase ) != self.order + 1:
SCREAMING_SNAKE_CASE : Optional[Any] = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowercase )}'''
)
raise ValueError(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = a_coeffs
SCREAMING_SNAKE_CASE : int = b_coeffs
def _A ( self : Optional[int] , UpperCAmelCase_ : float ):
SCREAMING_SNAKE_CASE : int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
SCREAMING_SNAKE_CASE : List[str] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
SCREAMING_SNAKE_CASE : List[str] = self.input_history[:-1]
SCREAMING_SNAKE_CASE : int = self.output_history[:-1]
SCREAMING_SNAKE_CASE : Optional[Any] = sample
SCREAMING_SNAKE_CASE : Optional[int] = result
return result
| 370
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
snake_case = {
"""google/pegasus-xsum""": 512,
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PegasusTokenizer
UpperCamelCase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is'''
f''' {type(UpperCAmelCase_ )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 319
| 0
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = np.inf
def set_batch_size(lowercase ) -> None:
nonlocal batch_size
if isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[str] = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(a__ , a__ ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE : Optional[Any] = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(a__ , a__ )
return None if batch_size is np.inf else batch_size
class SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : List[str] = False , UpperCAmelCase_ : int = False , UpperCAmelCase_ : Dict = None , **UpperCAmelCase_ : Tuple , ):
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
SCREAMING_SNAKE_CASE : Any = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = _PACKAGED_DATASETS_MODULES["parquet"][1]
SCREAMING_SNAKE_CASE : Optional[int] = Parquet(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , hash=__lowerCAmelCase , **__lowerCAmelCase , )
def _A ( self : List[Any] ):
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : int = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : Any = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE : Tuple = dataset
SCREAMING_SNAKE_CASE : List[str] = path_or_buf
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size or get_writer_batch_size(dataset.features )
SCREAMING_SNAKE_CASE : int = parquet_writer_kwargs
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
SCREAMING_SNAKE_CASE : Dict = self._write(file_obj=__lowerCAmelCase , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
else:
SCREAMING_SNAKE_CASE : Tuple = self._write(file_obj=self.path_or_buf , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
return written
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : int = parquet_writer_kwargs.pop("path_or_buf" , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE : Any = pq.ParquetWriter(__lowerCAmelCase , schema=__lowerCAmelCase , **__lowerCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __lowerCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
SCREAMING_SNAKE_CASE : Any = query_table(
table=self.dataset._data , key=slice(__lowerCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 371
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Dict = None
@experimental
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__a , __a , __a , __a , __a , __a , __a )
return _map_with_joblib(__a , __a , __a , __a , __a , __a , __a )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = num_proc if num_proc <= len(__a ) else len(__a )
SCREAMING_SNAKE_CASE : Tuple = [] # We organize the splits ourselve (contiguous splits)
for index in range(__a ):
SCREAMING_SNAKE_CASE : List[str] = len(__a ) // num_proc
SCREAMING_SNAKE_CASE : int = len(__a ) % num_proc
SCREAMING_SNAKE_CASE : List[str] = div * index + min(__a , __a )
SCREAMING_SNAKE_CASE : str = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__a ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'''Error dividing inputs iterable among processes. '''
F'''Total number of objects {len(__a )}, '''
F'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
F'''Spawning {num_proc} processes for {len(__a )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
SCREAMING_SNAKE_CASE : Optional[int] = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE : Optional[Any] = (RLock(),), tqdm.set_lock
with Pool(__a , initargs=__a , initializer=__a ) as pool:
SCREAMING_SNAKE_CASE : Dict = pool.map(__a , __a )
logger.info(F'''Finished {num_proc} processes''' )
SCREAMING_SNAKE_CASE : Any = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'''Unpacked {len(__a )} objects''' )
return mapped
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__a ):
return joblib.Parallel()(
joblib.delayed(__a )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE : Tuple = None
| 350
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : int = 2
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Any = config["lr"]
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : Any = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 319
| 0
|
def lowerCamelCase__ ( lowercase = 1000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 1, 1
SCREAMING_SNAKE_CASE : Any = 2
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = fa + fa
SCREAMING_SNAKE_CASE : List[str] = fa, f
index += 1
for _ in str(A__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 351
|
import functools
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowercase ) >= 366:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE : Dict = set(lowercase )
@functools.cache
def dynamic_programming(lowercase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case = """\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"""
snake_case = """\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"""
snake_case = """\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n"""
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
def remove_articles(lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(R"\b(a|an|the)\b" , re.UNICODE )
return re.sub(lowerCAmelCase__ , " " , lowerCAmelCase__ )
def white_space_fix(lowercase ):
return " ".join(text.split() )
def remove_punc(lowercase ):
SCREAMING_SNAKE_CASE : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [any(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
return (sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ )) * 100
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [rgram for rgrams in rgramslist for rgram in rgrams]
SCREAMING_SNAKE_CASE : Union[str, Any] = Counter(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = Counter(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = Counter()
for sgram, scount in sgramcounter.items():
SCREAMING_SNAKE_CASE : Optional[Any] = scount * numref
SCREAMING_SNAKE_CASE : Optional[int] = Counter(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = Counter()
for cgram, ccount in cgramcounter.items():
SCREAMING_SNAKE_CASE : int = ccount * numref
# KEEP
SCREAMING_SNAKE_CASE : Optional[Any] = sgramcounter_rep & cgramcounter_rep
SCREAMING_SNAKE_CASE : Dict = keepgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE : Union[str, Any] = sgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : str = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = 1
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = keeptmpscorea / len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
SCREAMING_SNAKE_CASE : List[str] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
SCREAMING_SNAKE_CASE : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
SCREAMING_SNAKE_CASE : Any = sgramcounter_rep - cgramcounter_rep
SCREAMING_SNAKE_CASE : Any = delgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE : List[str] = sgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Dict = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE : Any = 1
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE : List[str] = deltmpscorea / len(lowerCAmelCase__ )
# ADDITION
SCREAMING_SNAKE_CASE : Optional[Any] = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = set(lowerCAmelCase__ ) & set(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : List[str] = 1
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE : Any = addtmpscore / len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = addtmpscore / len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = ssent.split(" " )
SCREAMING_SNAKE_CASE : Union[str, Any] = csent.split(" " )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Tuple = []
for rsent in rsents:
SCREAMING_SNAKE_CASE : List[Any] = rsent.split(" " )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[int] = []
ragramslist.append(lowerCAmelCase__ )
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
if i < len(lowerCAmelCase__ ) - 1:
SCREAMING_SNAKE_CASE : Dict = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 2:
SCREAMING_SNAKE_CASE : Dict = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 3:
SCREAMING_SNAKE_CASE : Tuple = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowerCAmelCase__ )
ragramslist.append(lowerCAmelCase__ )
ragramslist.append(lowerCAmelCase__ )
ragramslist.append(lowerCAmelCase__ )
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
if i < len(lowerCAmelCase__ ) - 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 2:
SCREAMING_SNAKE_CASE : Optional[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 3:
SCREAMING_SNAKE_CASE : Any = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowerCAmelCase__ )
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
if i < len(lowerCAmelCase__ ) - 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 2:
SCREAMING_SNAKE_CASE : Optional[int] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 3:
SCREAMING_SNAKE_CASE : Union[str, Any] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowerCAmelCase__ )
(SCREAMING_SNAKE_CASE) : str = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
(SCREAMING_SNAKE_CASE) : Tuple = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
(SCREAMING_SNAKE_CASE) : Tuple = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
(SCREAMING_SNAKE_CASE) : List[Any] = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
SCREAMING_SNAKE_CASE : List[Any] = sum([delascore, delascore, delascore, delascore] ) / 4
SCREAMING_SNAKE_CASE : Optional[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
SCREAMING_SNAKE_CASE : Any = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase__ ( lowercase , lowercase = True , lowercase = "13a" , lowercase = True ):
"""simple docstring"""
if lowercase:
SCREAMING_SNAKE_CASE : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
SCREAMING_SNAKE_CASE : Optional[int] = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase__ )()(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : List[str] = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase__ )
elif tokenizer == "moses":
SCREAMING_SNAKE_CASE : Dict = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase__ , return_str=lowerCAmelCase__ , escape=lowerCAmelCase__ )
elif tokenizer == "penn":
SCREAMING_SNAKE_CASE : Optional[int] = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase__ , return_str=lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Any = sentence
if not return_str:
SCREAMING_SNAKE_CASE : Optional[int] = normalized_sent.split()
return normalized_sent
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if not (len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )):
raise ValueError("Sources length must match predictions and references lengths." )
SCREAMING_SNAKE_CASE : Any = 0
for src, pred, refs in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
sari_score += SARIsent(normalize(lowerCAmelCase__ ) , normalize(lowerCAmelCase__ ) , [normalize(lowerCAmelCase__ ) for sent in refs] )
SCREAMING_SNAKE_CASE : int = sari_score / len(lowerCAmelCase__ )
return 100 * sari_score
def lowerCamelCase__ ( lowercase , lowercase , lowercase="exp" , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
SCREAMING_SNAKE_CASE : Optional[Any] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE : Union[str, Any] = sacrebleu.corpus_bleu(
lowerCAmelCase__ , lowerCAmelCase__ , smooth_method=lowerCAmelCase__ , smooth_value=lowerCAmelCase__ , force=lowerCAmelCase__ , lowercase=lowerCAmelCase__ , use_effective_order=lowerCAmelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def _A ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : int = {}
result.update({"sari": compute_sari(sources=snake_case_ , predictions=snake_case_ , references=snake_case_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=snake_case_ , references=snake_case_ )} )
result.update({"exact": compute_em(predictions=snake_case_ , references=snake_case_ )} )
return result
| 352
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 319
| 0
|
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = create_tensor(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = gather(lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [state.process_index]
SCREAMING_SNAKE_CASE : Tuple = gather_object(lowercase )
assert len(lowercase ) == state.num_processes, F'''{gathered_obj}, {len(lowercase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = create_tensor(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = broadcast(lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if state.is_main_process:
SCREAMING_SNAKE_CASE : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE : List[str] = pad_across_processes(lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE : List[Any] = create_tensor(lowercase )
SCREAMING_SNAKE_CASE : int = reduce(lowercase , "sum" )
SCREAMING_SNAKE_CASE : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase , lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = create_tensor(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = reduce(lowercase , "mean" )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase , lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
main()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = PartialState()
state.print(F'''State: {state}''' )
state.print("testing gather" )
test_gather(lowercase )
state.print("testing gather_object" )
test_gather_object(lowercase )
state.print("testing broadcast" )
test_broadcast(lowercase )
state.print("testing pad_across_processes" )
test_pad_across_processes(lowercase )
state.print("testing reduce_sum" )
test_reduce_sum(lowercase )
state.print("testing reduce_mean" )
test_reduce_mean(lowercase )
if __name__ == "__main__":
main()
| 353
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319
| 0
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
snake_case = "bart"
snake_case = True
@st.cache(allow_output_mutation=lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
SCREAMING_SNAKE_CASE : List[Any] = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
SCREAMING_SNAKE_CASE : int = qar_model.eval()
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
SCREAMING_SNAKE_CASE : Dict = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
SCREAMING_SNAKE_CASE : Optional[Any] = sas_model.eval()
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE : Dict = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
SCREAMING_SNAKE_CASE : List[str] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
SCREAMING_SNAKE_CASE : Tuple = faiss.IndexFlatIP(128 )
SCREAMING_SNAKE_CASE : int = faiss.index_cpu_to_gpu(lowercase , 1 , lowercase )
wikiaab_gpu_index_flat.add(lowercase ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = (None, None)
SCREAMING_SNAKE_CASE : List[str] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
SCREAMING_SNAKE_CASE : Tuple = elia["train_eli5"]
SCREAMING_SNAKE_CASE : Tuple = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
SCREAMING_SNAKE_CASE : Any = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowercase )
return (elia_train, eli5_train_q_index)
snake_case = load_indexes()
snake_case = load_models()
snake_case = load_train_data()
def lowerCamelCase__ ( lowercase , lowercase=10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_questions_for_retrieval([question] , lowercase , lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = eli5_train_q_index.search(lowercase , lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = [elia_train[int(lowercase )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( lowercase , lowercase="wiki40b" , lowercase="dense" , lowercase=10 ):
"""simple docstring"""
if source == "none":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = query_qa_dense_index(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = query_es_index(
lowercase , lowercase , index_name="english_wiki40b_snippets_100w" , n_results=lowercase , )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
SCREAMING_SNAKE_CASE : Union[str, Any] = "question: {} context: {}".format(lowercase , lowercase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowercase : None),
} )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=64 , lowercase=256 , lowercase=False , lowercase=2 , lowercase=0.95 , lowercase=0.8 ):
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = qa_sas_generate(
lowercase , lowercase , lowercase , num_answers=1 , num_beams=lowercase , min_len=lowercase , max_len=lowercase , do_sample=lowercase , temp=lowercase , top_p=lowercase , top_k=lowercase , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
snake_case = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
snake_case = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
snake_case = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
snake_case = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
snake_case = st.sidebar.checkbox("""Demo options""")
if demo_options:
snake_case = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
snake_case = action_list.index(action_st)
snake_case = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
snake_case = show_type == "Show full text of passages"
else:
snake_case = 3
snake_case = True
snake_case = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
snake_case = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
snake_case = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
snake_case = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
snake_case = "wiki40b"
snake_case = "dense"
snake_case = "beam"
snake_case = 2
snake_case = 64
snake_case = 256
snake_case = None
snake_case = None
snake_case = st.sidebar.checkbox("""Generation options""")
if generate_options:
snake_case = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
snake_case = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
snake_case = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
snake_case = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
snake_case = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
snake_case = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
snake_case = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
snake_case = None
# start main text
snake_case = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
snake_case = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
snake_case = st.text_input("""Enter your question here:""", """""")
else:
snake_case = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
snake_case = make_support(question, source=wiki_source, method="""dense""", n_results=10)
snake_case = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
snake_case = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
snake_case = support_list[:10]
snake_case = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
snake_case = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
snake_case = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
snake_case = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(""" """, """_"""))
snake_case = res[1].strip()
if sec_titles == "":
snake_case = "[{}]({})".format(res[0], wiki_url)
else:
snake_case = sec_titles.split(""" & """)
snake_case = " & ".join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
snake_case = find_nearest_training(question)
snake_case = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
snake_case = [
"{}. {}".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
snake_case = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 354
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''ClapFeatureExtractor'''
UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 319
| 0
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
snake_case = """naver-clova-ix/donut-base"""
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = DonutProcessor.from_pretrained(UpperCamelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
SCREAMING_SNAKE_CASE : Dict = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
SCREAMING_SNAKE_CASE : int = self.processor.tokenajson(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , UpperCamelCase_ )
| 355
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Optional[int] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path
elif issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Tuple = "train"
SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path}
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
SCREAMING_SNAKE_CASE : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase )
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert get_writer_batch_size(lowercase ) == expected
| 319
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
snake_case = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = '''facebook/nllb-200-distilled-600M'''
UpperCamelCase_ : int = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
UpperCamelCase_ : Union[str, Any] = '''translator'''
UpperCamelCase_ : int = AutoTokenizer
UpperCamelCase_ : Optional[int] = AutoModelForSeqaSeqLM
UpperCamelCase_ : List[Any] = LANGUAGE_CODES
UpperCamelCase_ : int = ['''text''', '''text''', '''text''']
UpperCamelCase_ : int = ['''text''']
def _A ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
SCREAMING_SNAKE_CASE : Dict = self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE : List[Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__lowerCamelCase , return_tensors="pt" , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase )
def _A ( self : int , UpperCAmelCase_ : Dict ):
return self.model.generate(**__lowerCamelCase )
def _A ( self : int , UpperCAmelCase_ : Any ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowerCamelCase )
| 356
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
import torch
from torch import nn
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Union[str, Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = n_token
SCREAMING_SNAKE_CASE : List[Any] = d_embed
SCREAMING_SNAKE_CASE : str = d_proj
SCREAMING_SNAKE_CASE : Optional[Any] = cutoffs + [n_token]
SCREAMING_SNAKE_CASE : Tuple = [0] + self.cutoffs
SCREAMING_SNAKE_CASE : int = div_val
SCREAMING_SNAKE_CASE : Any = self.cutoffs[0]
SCREAMING_SNAKE_CASE : List[str] = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE : Dict = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList()
SCREAMING_SNAKE_CASE : str = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A ) ) )
else:
self.out_projs.append(_A )
self.out_layers.append(nn.Linear(_A , _A ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : List[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A ) ) )
self.out_layers.append(nn.Linear(_A , r_idx - l_idx ) )
SCREAMING_SNAKE_CASE : int = keep_order
def _A ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
if proj is None:
SCREAMING_SNAKE_CASE : List[str] = nn.functional.linear(_A , _A , bias=_A )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE : List[str] = nn.functional.linear(_A , proj.t().contiguous() )
SCREAMING_SNAKE_CASE : int = nn.functional.linear(_A , _A , bias=_A )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _A ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=False ):
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE : int = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Tuple = hidden.view(-1 , hidden.size(-1 ) )
SCREAMING_SNAKE_CASE : Optional[int] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE : Any = labels != -100
SCREAMING_SNAKE_CASE : Any = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE : str = (
-nn.functional.log_softmax(_A , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.log_softmax(_A , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : str = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE : List[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE : int = self.out_layers[i].weight
SCREAMING_SNAKE_CASE : List[Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_A )
biases.append(_A )
SCREAMING_SNAKE_CASE : Tuple = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE : Optional[int] = self._compute_logit(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE : Tuple = nn.functional.log_softmax(_A , dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE : int = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[Any] = [0] + self.cutoffs
for i in range(len(_A ) - 1 ):
SCREAMING_SNAKE_CASE : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE : Tuple = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE : Any = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE : List[Any] = labels.index_select(0 , _A ) - l_idx
SCREAMING_SNAKE_CASE : Optional[Any] = head_logprob.index_select(0 , _A )
SCREAMING_SNAKE_CASE : int = hidden.index_select(0 , _A )
else:
SCREAMING_SNAKE_CASE : List[Any] = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE : Dict = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE : Any = self._compute_logit(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE : List[Any] = nn.functional.log_softmax(_A , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE : Any = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE : List[Any] = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , _A , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _A ( self : Optional[Any] , UpperCAmelCase_ : Dict ):
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE : Dict = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_A , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : str = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE : int = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE : Any = self.out_layers[i].weight
SCREAMING_SNAKE_CASE : List[Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE : int = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_A )
biases.append(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self._compute_logit(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE : int = nn.functional.log_softmax(_A , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = [0] + self.cutoffs
for i in range(len(_A ) - 1 ):
SCREAMING_SNAKE_CASE : List[Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE : Dict = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE : Optional[int] = self._compute_logit(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.log_softmax(_A , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE : str = logprob_i
return out
| 357
|
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (boundary[1] - boundary[0]) / steps
SCREAMING_SNAKE_CASE : Dict = boundary[0]
SCREAMING_SNAKE_CASE : List[str] = boundary[1]
SCREAMING_SNAKE_CASE : int = make_points(_A , _A , _A )
SCREAMING_SNAKE_CASE : Any = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = a + h
while x < (b - h):
yield x
SCREAMING_SNAKE_CASE : int = x + h
def lowerCamelCase__ ( lowercase ): # enter your function here
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (x - 0) * (x - 0)
return y
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 0.0 # Lower bound of integration
SCREAMING_SNAKE_CASE : List[str] = 1.0 # Upper bound of integration
SCREAMING_SNAKE_CASE : Optional[int] = 10.0 # define number of steps or resolution
SCREAMING_SNAKE_CASE : Union[str, Any] = [a, b] # define boundary of integration
SCREAMING_SNAKE_CASE : List[str] = method_a(_A , _A )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 358
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case = get_logger(__name__)
snake_case = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
for processor in self:
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters
if len(UpperCAmelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : float ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
SCREAMING_SNAKE_CASE : Optional[int] = temperature
def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
SCREAMING_SNAKE_CASE : Optional[int] = top_p
SCREAMING_SNAKE_CASE : str = filter_value
SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] )
SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value )
SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ )
# min tokens to keep
SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1]
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = filter_value
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape
SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value )
SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten()
SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = max_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
SCREAMING_SNAKE_CASE : List[str] = min_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
# create boolean flag to decide if min length penalty should be applied
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = begin_index
def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
def _force_token(UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = scores.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" )
SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) )
return new_scores
SCREAMING_SNAKE_CASE : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id
SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ):
SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
# suppress <|notimestamps|> which is handled by without_timestamps
SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , )
return jnp.where(
UpperCAmelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(
UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 )
def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
return scores
| 319
| 0
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
UpperCamelCase__ : Union[str, Any] = [image]
UpperCamelCase__ : List[str] = [trans(img.convert("RGB" ) ) for img in image]
UpperCamelCase__ : str = torch.stack(lowerCAmelCase__ )
return image
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase__ : List[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A__ , scheduler=A__ )
def _A ( self : str , UpperCAmelCase_ : Union[str, Any] ):
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _A ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
# get the original timestep using init_timestep
UpperCamelCase__ : List[str] = min(int(num_inference_steps * strength ) , A__ )
UpperCamelCase__ : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _A ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=None ):
if not isinstance(A__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A__ )}''' )
UpperCamelCase__ : Optional[Any] = image.to(device=A__ , dtype=A__ )
if isinstance(A__ , A__ ) and len(A__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(A__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase__ : List[Any] = init_latents.shape
UpperCamelCase__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
# get latents
print("add noise to latents at timestep" , A__ )
UpperCamelCase__ : List[Any] = self.scheduler.add_noise(A__ , A__ , A__ )
UpperCamelCase__ : int = init_latents
return latents
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCAmelCase_ : int = None , UpperCAmelCase_ : str = 0.8 , UpperCAmelCase_ : List[Any] = 1 , UpperCAmelCase_ : List[Any] = None , UpperCAmelCase_ : Dict = 0.0 , UpperCAmelCase_ : List[Any] = 50 , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : List[str] = "pil" , UpperCAmelCase_ : List[str] = True , ):
self.check_inputs(A__ )
# 2. Preprocess image
UpperCamelCase__ : str = preprocess(A__ )
# 3. set timesteps
self.scheduler.set_timesteps(A__ , device=self.device )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.get_timesteps(A__ , A__ , self.device )
UpperCamelCase__ : Dict = timesteps[:1].repeat(A__ )
# 4. Prepare latent variables
UpperCamelCase__ : str = self.prepare_latents(A__ , A__ , A__ , self.unet.dtype , self.device , A__ )
UpperCamelCase__ : Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(A__ ):
# 1. predict noise model_output
UpperCamelCase__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ : int = self.scheduler.step(
A__ , A__ , A__ , eta=A__ , use_clipped_model_output=A__ , generator=A__ , ).prev_sample
UpperCamelCase__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : Dict = self.numpy_to_pil(A__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=A__ )
| 359
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 319
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Any=[10, 20, 30, 40] , UpperCAmelCase_ : Optional[int]=[2, 2, 3, 2] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Union[str, Any]=[2, 3, 4] , UpperCAmelCase_ : List[Any]=None , ):
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = num_stages
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : Any = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = out_features
SCREAMING_SNAKE_CASE : str = out_indices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def _A ( self : str ):
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def _A ( self : Tuple ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _A ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : str = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __a , __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : str = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : int = False
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = ConvNextModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _A ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self : str ):
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def _A ( self : Tuple ):
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def _A ( self : Dict ):
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def _A ( self : Union[str, Any] ):
pass
def _A ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(a__ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def _A ( self : Optional[int] ):
def check_hidden_states_output(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Dict = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(a__ , a__ , a__ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def _A ( self : Union[str, Any] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Any ):
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**a__ )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , __a ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase_ : List[str] = ConvNextConfig
UpperCamelCase_ : int = False
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextModelTester(self )
| 360
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319
| 0
|
import qiskit
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : Optional[int] = qiskit.QuantumCircuit(lowercase , lowercase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : Dict = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
snake_case = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 361
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = StableDiffusionLDMaDPipeline
UpperCamelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _A ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(__a )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=0 ):
if str(__a ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(__a )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=__a ).manual_seed(__a )
SCREAMING_SNAKE_CASE : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = StableDiffusionLDMaDPipeline(**__a )
SCREAMING_SNAKE_CASE : Tuple = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(__a )
SCREAMING_SNAKE_CASE : str = ldmad_pipe(**__a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Union[str, Any] = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
SCREAMING_SNAKE_CASE : Optional[int] = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionLDMaDPipeline(**__a )
SCREAMING_SNAKE_CASE : List[Any] = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(__a )
SCREAMING_SNAKE_CASE : str = 3 * [inputs["prompt"]]
# forward
SCREAMING_SNAKE_CASE : Optional[int] = ldmad_pipe(**__a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = output.rgb, output.depth
SCREAMING_SNAKE_CASE : List[str] = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = depth_slice_a[0, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(__a )
SCREAMING_SNAKE_CASE : List[str] = 3 * [inputs.pop("prompt" )]
SCREAMING_SNAKE_CASE : int = ldmad_pipe.tokenizer(
__a , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : List[Any] = text_inputs["input_ids"].to(__a )
SCREAMING_SNAKE_CASE : Union[str, Any] = ldmad_pipe.text_encoder(__a )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE : Dict = ldmad_pipe(**__a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : int = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[str] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[Any] = PNDMScheduler(skip_prk_steps=__a )
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionLDMaDPipeline(**__a )
SCREAMING_SNAKE_CASE : Union[str, Any] = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(__a )
SCREAMING_SNAKE_CASE : Optional[int] = "french fries"
SCREAMING_SNAKE_CASE : int = ldmad_pipe(**__a , negative_prompt=__a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Tuple = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
SCREAMING_SNAKE_CASE : List[str] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict="cpu" , UpperCAmelCase_ : List[str]=torch.floataa , UpperCAmelCase_ : Dict=0 ):
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=__a ).manual_seed(__a )
SCREAMING_SNAKE_CASE : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__a ).to(device=__a , dtype=__a )
SCREAMING_SNAKE_CASE : List[Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
SCREAMING_SNAKE_CASE : Any = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
SCREAMING_SNAKE_CASE : List[Any] = self.get_inputs(__a )
SCREAMING_SNAKE_CASE : Union[str, Any] = ldmad_pipe(**__a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = output.rgb, output.depth
SCREAMING_SNAKE_CASE : Optional[Any] = rgb[0, -3:, -3:, -1].flatten()
SCREAMING_SNAKE_CASE : int = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
SCREAMING_SNAKE_CASE : str = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]="cpu" , UpperCAmelCase_ : Tuple=torch.floataa , UpperCAmelCase_ : Tuple=0 ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
SCREAMING_SNAKE_CASE : str = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : Any = torch.from_numpy(__a ).to(device=__a , dtype=__a )
SCREAMING_SNAKE_CASE : Tuple = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
SCREAMING_SNAKE_CASE : Any = self.get_inputs(__a )
SCREAMING_SNAKE_CASE : Optional[Any] = ldmad_pipe(**__a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = output.rgb, output.depth
SCREAMING_SNAKE_CASE : List[Any] = 0.495_586
SCREAMING_SNAKE_CASE : Any = 0.33_795_515
SCREAMING_SNAKE_CASE : str = 112.48_518
SCREAMING_SNAKE_CASE : List[str] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(__a )
SCREAMING_SNAKE_CASE : Optional[int] = ldmad_pipe(**__a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE : str = 0.4_194_127
SCREAMING_SNAKE_CASE : Dict = 0.35_375_586
SCREAMING_SNAKE_CASE : int = 0.5_638_502
SCREAMING_SNAKE_CASE : Optional[int] = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 362
|
def lowerCamelCase__ ( ):
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
snake_case = generate_large_matrix()
snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2
SCREAMING_SNAKE_CASE : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE : List[Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = len(grid[0] )
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def lowerCamelCase__ ( ):
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
SCREAMING_SNAKE_CASE : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 319
| 0
|
def lowerCamelCase__ ( lowercase = 1000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
SCREAMING_SNAKE_CASE : List[str] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE : List[str] = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE : Any = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE : Optional[int] = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 363
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case = ["""small""", """medium""", """large"""]
snake_case = """lm_head.decoder.weight"""
snake_case = """lm_head.weight"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase )
SCREAMING_SNAKE_CASE : Any = d.pop(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
snake_case = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 319
| 0
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
SCREAMING_SNAKE_CASE : Tuple = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
snake_case = input("""Enter a string """).strip()
snake_case = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
snake_case = logging.get_logger(__name__)
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = nn.functional.normalize(snake_case__ )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.normalize(snake_case__ )
return torch.mm(snake_case__ , normalized_text_embeds.t() )
class SCREAMING_SNAKE_CASE ( a__ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = CLIPConfig
UpperCamelCase_ : Optional[Any] = ["CLIPEncoderLayer"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = CLIPVisionModel(config.vision_config )
SCREAMING_SNAKE_CASE : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 ) , requires_grad=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _A ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.vision_model(SCREAMING_SNAKE_CASE_ )[1] # pooled_output
SCREAMING_SNAKE_CASE : int = self.visual_projection(SCREAMING_SNAKE_CASE_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE : Union[str, Any] = cosine_distance(SCREAMING_SNAKE_CASE_ , self.special_care_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE : List[str] = cosine_distance(SCREAMING_SNAKE_CASE_ , self.concept_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Dict = image_embeds.shape[0]
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE : Any = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE : int = self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE : str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
SCREAMING_SNAKE_CASE : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(SCREAMING_SNAKE_CASE_ )
result.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _A ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_model(SCREAMING_SNAKE_CASE_ )[1] # pooled_output
SCREAMING_SNAKE_CASE : str = self.visual_projection(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Dict = cosine_distance(SCREAMING_SNAKE_CASE_ , self.special_care_embeds )
SCREAMING_SNAKE_CASE : Union[str, Any] = cosine_distance(SCREAMING_SNAKE_CASE_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE : Dict = 0.0
SCREAMING_SNAKE_CASE : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = special_care * 0.01
SCREAMING_SNAKE_CASE : Union[str, Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
SCREAMING_SNAKE_CASE : List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 365
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 319
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts
SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets
SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = src_parent
SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ )
return True
def _A ( self : Tuple , UpperCAmelCase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 319
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : List[str] = """ibert"""
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict=3_0522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : str=3072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-12 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : int="absolute" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple="none" , **UpperCAmelCase_ : Any , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Tuple = force_dequant
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _A ( self : Tuple ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 367
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''timm_backbone'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
| 319
| 0
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case = logging.get_logger(__name__)
snake_case = TypeVar("""DatasetType""", Dataset, IterableDataset)
def lowerCamelCase__ ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ , (Dataset, IterableDataset) ):
if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(snake_case_ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (
(Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ , snake_case_ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ )
else:
return _interleave_iterable_datasets(
snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ )
def lowerCamelCase__ ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ , (Dataset, IterableDataset) ):
if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(snake_case_ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = (
(Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ , snake_case_ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ )
else:
return _concatenate_iterable_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ )
| 368
|
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319
| 0
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE : Tuple = len(lowerCAmelCase_ ) // 2
SCREAMING_SNAKE_CASE : Any = arr[0:mid]
SCREAMING_SNAKE_CASE : List[Any] = arr[mid:]
SCREAMING_SNAKE_CASE : Optional[int] = count_inversions_recursive(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : int = count_inversions_recursive(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE : str = count_inversions_bf(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE : int = count_inversions_bf(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : int = count_inversions_bf(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 369
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
snake_case = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] ):
SCREAMING_SNAKE_CASE : List[Any] = False
def _A ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
if not self.initialized:
SCREAMING_SNAKE_CASE : List[Any] = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = True
def _A ( self : str ):
self.retriever.index.init_index()
def _A ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Tuple = self.retriever._main_retrieve(UpperCamelCase__ , UpperCamelCase__ )
return doc_ids, retrieved_doc_embeds
class SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None ):
if index is not None and index.is_initialized() and len(UpperCamelCase__ ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for worker in self.retrieval_workers
] )
def _A ( self : Tuple ):
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE : Any = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE : Optional[Any] = ray.get(random_worker.retrieve.remote(UpperCamelCase__ , UpperCamelCase__ ) )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self._main_retrieve(UpperCamelCase__ , UpperCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase__ )
@classmethod
def _A ( cls : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : Union[str, Any] ):
return super(UpperCamelCase__ , cls ).get_tokenizers(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _A ( cls : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("config" , UpperCamelCase__ ) or RagConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = RagTokenizer.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE : List[Any] = "custom"
SCREAMING_SNAKE_CASE : Any = CustomHFIndex(config.retrieval_vector_size , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Tuple = cls._build_index(UpperCamelCase__ )
return cls(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , retrieval_workers=UpperCamelCase__ , index=UpperCamelCase__ , )
| 370
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
snake_case = {
"""google/pegasus-xsum""": 512,
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PegasusTokenizer
UpperCamelCase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is'''
f''' {type(UpperCAmelCase_ )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 319
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase=True ):
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE : Any = model(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def lowerCamelCase__ ( lowercase , lowercase=False ):
"""simple docstring"""
set_seed(42 )
SCREAMING_SNAKE_CASE : Union[str, Any] = RegressionModel()
SCREAMING_SNAKE_CASE : Union[str, Any] = deepcopy(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE : str = DataLoader(__lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE : Dict = AdamW(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE : Any = AdamW(params=ddp_model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE : Tuple = LambdaLR(__lowerCAmelCase , lr_lambda=lambda lowercase : epoch**0.65 )
SCREAMING_SNAKE_CASE : str = LambdaLR(__lowerCAmelCase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = get_training_setup(__lowerCAmelCase )
# Use a single batch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE : Union[str, Any] = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = get_training_setup(__lowerCAmelCase )
# Use a single batch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE : List[Any] = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def lowerCamelCase__ ( lowercase=False , lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE : Tuple = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def lowerCamelCase__ ( lowercase=False , lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE : Dict = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Accelerator()
SCREAMING_SNAKE_CASE : Dict = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(__lowerCAmelCase , batch_size=16 )
SCREAMING_SNAKE_CASE : List[Any] = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE : Any = DataLoader(__lowerCAmelCase , batch_size=16 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 371
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
from copy import deepcopy
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : list[int] | None = None , UpperCAmelCase_ : int | None = None ):
if arr is None and size is not None:
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : List[str] = [0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError("Either arr or size must be specified" )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : list[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
SCREAMING_SNAKE_CASE : List[Any] = self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _A ( UpperCAmelCase_ : int ):
return index + (index & (-index))
@staticmethod
def _A ( UpperCAmelCase_ : int ):
return index - (index & (-index))
def _A ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
SCREAMING_SNAKE_CASE : Any = self.next_(UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def _A ( self : Dict , UpperCAmelCase_ : int ):
if right == 0:
return 0
SCREAMING_SNAKE_CASE : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
SCREAMING_SNAKE_CASE : List[str] = self.prev(UpperCAmelCase_ )
return result
def _A ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : int ):
return self.query(UpperCAmelCase_ , index + 1 )
def _A ( self : List[Any] , UpperCAmelCase_ : int ):
value -= self.tree[0]
if value < 0:
return -1
SCREAMING_SNAKE_CASE : int = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
SCREAMING_SNAKE_CASE : List[str] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : int = 2
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Any = config["lr"]
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : Any = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 319
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 351
|
import functools
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowercase ) >= 366:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE : Dict = set(lowercase )
@functools.cache
def dynamic_programming(lowercase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = ['''torch''']
def __init__( self : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
| 352
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 319
| 0
|
from collections.abc import Iterable
from typing import Generic, TypeVar
snake_case = TypeVar("""_T""")
class SCREAMING_SNAKE_CASE ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Iterable[_T] | None = None ):
SCREAMING_SNAKE_CASE : list[_T] = list(iterable or [] )
SCREAMING_SNAKE_CASE : list[_T] = []
def __len__( self : Dict ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : str ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def _A ( self : List[Any] , UpperCAmelCase_ : _T ):
self._stacka.append(UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : str = self._stacka.pop
SCREAMING_SNAKE_CASE : Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319
| 0
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _A ( cls : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def _A ( cls : Optional[int] ):
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ , repo_id="test-config" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase_ , repo_id="valid_org/test-config-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _A ( self : List[Any] ):
CustomConfig.register_for_auto_class()
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
SCREAMING_SNAKE_CASE : Optional[int] = c.n_embd + 1 # int
SCREAMING_SNAKE_CASE : Any = c.resid_pdrop + 1.0 # float
SCREAMING_SNAKE_CASE : Optional[int] = not c.scale_attn_weights # bool
SCREAMING_SNAKE_CASE : Union[str, Any] = c.summary_type + "foo" # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase_ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(UpperCAmelCase_ , c.summary_type , "mismatch for key: summary_type" )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[str] = PretrainedConfig()
SCREAMING_SNAKE_CASE : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
SCREAMING_SNAKE_CASE : str = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_ )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f''' {', '.join(UpperCAmelCase_ )}.''' )
def _A ( self : str ):
with self.assertRaises(UpperCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(UpperCAmelCase_ )
def _A ( self : Optional[int] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE : Tuple = mock.Mock()
SCREAMING_SNAKE_CASE : str = 500
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Dict = HTTPError
SCREAMING_SNAKE_CASE : Dict = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_ ) as mock_head:
SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _A ( self : Any ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : List[Any] = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
SCREAMING_SNAKE_CASE : str = ["config.42.0.0.json"]
SCREAMING_SNAKE_CASE : List[str] = 768
configuration.save_pretrained(UpperCAmelCase_ )
shutil.move(os.path.join(UpperCAmelCase_ , "config.4.0.0.json" ) , os.path.join(UpperCAmelCase_ , "config.42.0.0.json" ) )
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def _A ( self : Dict ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
SCREAMING_SNAKE_CASE : Dict = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
SCREAMING_SNAKE_CASE : Optional[int] = "v4.0.0"
SCREAMING_SNAKE_CASE : Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
SCREAMING_SNAKE_CASE : int = "v3.0.0"
SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 354
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''ClapFeatureExtractor'''
UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 319
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Optional[int] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path
elif issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Tuple = "train"
SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path}
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
SCREAMING_SNAKE_CASE : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase )
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert get_writer_batch_size(lowercase ) == expected
| 319
| 0
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
snake_case = logging.getLogger(__name__)
torch.set_grad_enabled(False)
snake_case = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCamelCase__ ( lowercase , lowercase=100 , lowercase=" " ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = text.split(lowercase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase ) , lowercase )]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(lowercase ):
titles.append(title if title is not None else "" )
texts.append(lowercase )
return {"title": titles, "text": texts}
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=lowercase , padding="longest" , return_tensors="pt" )["input_ids"]
SCREAMING_SNAKE_CASE : Union[str, Any] = ctx_encoder(input_ids.to(device=lowercase ) , return_dict=lowercase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase__ ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE : int = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE : Optional[int] = dataset.map(lowercase , batched=lowercase , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE : int = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE : Dict = dataset.map(
partial(lowercase , ctx_encoder=lowercase , ctx_tokenizer=lowercase ) , batched=lowercase , batch_size=processing_args.batch_size , features=lowercase , )
# And finally save your dataset
SCREAMING_SNAKE_CASE : Tuple = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(lowercase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=lowercase )
# And save the index
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(lowercase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : str = field(
default=str(Path(lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
UpperCamelCase_ : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
UpperCamelCase_ : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=str(Path(lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
UpperCamelCase_ : int = field(
default=1_6 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : int = field(
default=7_6_8 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
UpperCamelCase_ : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
snake_case = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
snake_case = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 356
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 357
|
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 0
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
lowerCAmelCase = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
lowerCAmelCase = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return float((preds == labels).mean() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = simple_accuracy(lowercase , lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = np.array(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = en_sentvecs.shape[0]
# mean centering
SCREAMING_SNAKE_CASE : str = en_sentvecs - np.mean(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = in_sentvecs - np.mean(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Dict = cdist(lowercase , lowercase , "cosine" )
SCREAMING_SNAKE_CASE : Tuple = np.array(range(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = sim.argsort(axis=1 )[:, :10]
SCREAMING_SNAKE_CASE : Optional[Any] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : Tuple ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _A ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase_ , UpperCAmelCase_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 358
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case = get_logger(__name__)
snake_case = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : str , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[int] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
for processor in self:
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(processor.__call__ ).parameters
if len(UpperCAmelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
SCREAMING_SNAKE_CASE : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = processor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : float ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
SCREAMING_SNAKE_CASE : Optional[int] = temperature
def __call__( self : List[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : float , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
SCREAMING_SNAKE_CASE : Optional[int] = top_p
SCREAMING_SNAKE_CASE : str = filter_value
SCREAMING_SNAKE_CASE : List[str] = min_tokens_to_keep
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase_ , scores.shape[-1] )
SCREAMING_SNAKE_CASE : str = jnp.full_like(UpperCAmelCase_ , self.filter_value )
SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.softmax(UpperCAmelCase_ , axis=-1 ).cumsum(axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE : Optional[int] = jnp.roll(UpperCAmelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCAmelCase_ )
# min tokens to keep
SCREAMING_SNAKE_CASE : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = jnp.where(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jax.lax.sort_key_val(UpperCAmelCase_ , UpperCAmelCase_ )[-1]
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float = -float("Inf" ) , UpperCAmelCase_ : int = 1 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
SCREAMING_SNAKE_CASE : List[str] = max(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = filter_value
def __call__( self : Dict , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = scores.shape
SCREAMING_SNAKE_CASE : List[str] = jnp.full(batch_size * vocab_size , self.filter_value )
SCREAMING_SNAKE_CASE : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = lax.top_k(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.broadcast_to((jnp.arange(UpperCAmelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
SCREAMING_SNAKE_CASE : List[str] = topk_scores.flatten()
SCREAMING_SNAKE_CASE : List[Any] = topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE : Dict = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = next_scores_flat.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
return next_scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Dict = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = max_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : List[str] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE : str = 1 - jnp.bool_(cur_len - self.max_length + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
SCREAMING_SNAKE_CASE : List[str] = min_length
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
def __call__( self : Optional[Any] , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
# create boolean flag to decide if min length penalty should be applied
SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(UpperCAmelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = begin_index
def __call__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(UpperCAmelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase_ )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : List[Any] = list(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[Any] = dict(UpperCAmelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE : Any = force_token_array.at[index].set(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int ):
def _force_token(UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = scores.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE : Tuple = jnp.ones_like(UpperCAmelCase_ , dtype=scores.dtype ) * -float("inf" )
SCREAMING_SNAKE_CASE : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = lax.dynamic_update_slice(UpperCAmelCase_ , UpperCAmelCase_ , (0, current_token) )
return new_scores
SCREAMING_SNAKE_CASE : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase_ ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_config.eos_token_id
SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE : List[Any] = generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCAmelCase_ , "max_initial_timestamp_index" ):
SCREAMING_SNAKE_CASE : List[Any] = generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE : List[str] = model_config.vocab_size
def __call__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
# suppress <|notimestamps|> which is handled by without_timestamps
SCREAMING_SNAKE_CASE : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase_ , UpperCAmelCase_ , )
return jnp.where(
UpperCAmelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(cur_len == self.begin_index , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(
UpperCAmelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE : List[Any] = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 )
def handle_cumulative_probs(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = jax.vmap(UpperCAmelCase_ )(UpperCAmelCase_ , UpperCAmelCase_ )
return scores
| 319
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCamelCase__ : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ : str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ : Optional[Any] = 8
else:
UpperCamelCase__ : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCamelCase__ : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
UpperCamelCase__ : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
UpperCamelCase__ : int = 2
# New Code #
UpperCamelCase__ : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCamelCase__ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ : Any = config["lr"]
UpperCamelCase__ : Optional[Any] = int(config["num_epochs"] )
UpperCamelCase__ : List[Any] = int(config["seed"] )
UpperCamelCase__ : Union[str, Any] = int(config["batch_size"] )
UpperCamelCase__ : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
UpperCamelCase__ : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ : Any = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
UpperCamelCase__ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
UpperCamelCase__ : Any = model(**lowercase )
UpperCamelCase__ : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**lowercase )
UpperCamelCase__ : Optional[Any] = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
UpperCamelCase__ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
UpperCamelCase__ : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 359
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 319
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case = logging.get_logger(__name__)
snake_case = Dict[str, Any]
snake_case = List[Prediction]
@add_end_docstrings(lowerCAmelCase )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _A ( self : str , **UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Dict = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE : str = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int ):
return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[str] = load_image(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = torch.IntTensor([[image.height, image.width]] )
SCREAMING_SNAKE_CASE : List[str] = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
SCREAMING_SNAKE_CASE : Union[str, Any] = target_size
return inputs
def _A ( self : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : str = model_inputs.pop("target_size" )
SCREAMING_SNAKE_CASE : Any = self.model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE : int = model_inputs["bbox"]
return model_outputs
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=0.9 ):
SCREAMING_SNAKE_CASE : Dict = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
SCREAMING_SNAKE_CASE : Any = target_size[0].tolist()
def unnormalize(UpperCAmelCase_ : List[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
SCREAMING_SNAKE_CASE : Dict = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
SCREAMING_SNAKE_CASE : Dict = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
SCREAMING_SNAKE_CASE : List[Any] = [unnormalize(UpperCAmelCase_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
SCREAMING_SNAKE_CASE : List[Any] = ["score", "label", "box"]
SCREAMING_SNAKE_CASE : Union[str, Any] = [dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for vals in zip(scores.tolist() , UpperCAmelCase_ , UpperCAmelCase_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor.post_process_object_detection(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = raw_annotations[0]
SCREAMING_SNAKE_CASE : List[str] = raw_annotation["scores"]
SCREAMING_SNAKE_CASE : Dict = raw_annotation["labels"]
SCREAMING_SNAKE_CASE : Dict = raw_annotation["boxes"]
SCREAMING_SNAKE_CASE : List[Any] = scores.tolist()
SCREAMING_SNAKE_CASE : str = [self.model.config.idalabel[label.item()] for label in labels]
SCREAMING_SNAKE_CASE : Dict = [self._get_bounding_box(UpperCAmelCase_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
SCREAMING_SNAKE_CASE : Any = ["score", "label", "box"]
SCREAMING_SNAKE_CASE : str = [
dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _A ( self : Optional[Any] , UpperCAmelCase_ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
SCREAMING_SNAKE_CASE : int = box.int().tolist()
SCREAMING_SNAKE_CASE : List[Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 360
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = SwinConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = swin_name.split("_" )
SCREAMING_SNAKE_CASE : Tuple = name_split[1]
SCREAMING_SNAKE_CASE : Dict = int(name_split[4] )
SCREAMING_SNAKE_CASE : Optional[int] = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE : List[Any] = 96
SCREAMING_SNAKE_CASE : Optional[int] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : str = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE : Optional[int] = 128
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE : str = 192
SCREAMING_SNAKE_CASE : Optional[int] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE : Any = 21841
else:
SCREAMING_SNAKE_CASE : List[str] = 1000
SCREAMING_SNAKE_CASE : List[Any] = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Tuple = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : int = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = img_size
SCREAMING_SNAKE_CASE : List[str] = num_classes
SCREAMING_SNAKE_CASE : int = embed_dim
SCREAMING_SNAKE_CASE : str = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE : str = window_size
return config
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
SCREAMING_SNAKE_CASE : Tuple = "encoder." + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE : Union[str, Any] = "layernorm.weight"
if name == "norm.bias":
SCREAMING_SNAKE_CASE : Any = "layernorm.bias"
if "head" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("head" , "classifier" )
else:
SCREAMING_SNAKE_CASE : Tuple = "swin." + name
return name
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Any = orig_state_dict.pop(lowercase )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.split("." )
SCREAMING_SNAKE_CASE : Optional[Any] = int(key_split[1] )
SCREAMING_SNAKE_CASE : Dict = int(key_split[3] )
SCREAMING_SNAKE_CASE : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE : Dict = val[:dim, :]
SCREAMING_SNAKE_CASE : List[Any] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : str = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = val
return orig_state_dict
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
SCREAMING_SNAKE_CASE : int = get_swin_config(lowercase )
SCREAMING_SNAKE_CASE : int = SwinForImageClassification(lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = convert_state_dict(timm_model.state_dict() , lowercase )
model.load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowercase , return_tensors="pt" )
SCREAMING_SNAKE_CASE : Optional[Any] = timm_model(inputs["pixel_values"] )
SCREAMING_SNAKE_CASE : List[str] = model(**lowercase ).logits
assert torch.allclose(lowercase , lowercase , atol=1E-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 361
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319
| 0
|
# Lint as: python3
import itertools
import os
import re
snake_case = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
snake_case = re.compile(r"""([a-z\d])([A-Z])""")
snake_case = re.compile(r"""(?<!_)_(?!_)""")
snake_case = re.compile(r"""(_{2,})""")
snake_case = r"""^\w+(\.\w+)*$"""
snake_case = r"""<>:/\|?*"""
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = _uppercase_uppercase_re.sub(R"\1_\2" , lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = _lowercase_uppercase_re.sub(R"\1_\2" , lowercase )
return name.lower()
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = _single_underscore_re.split(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = [_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != "" )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if os.path.basename(lowercase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.path.basename(lowercase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowercase ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(lowercase )}-{split}'''
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = filename_prefix_for_split(lowercase , lowercase )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowercase , lowercase )
return F'''{filepath}*'''
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = filename_prefix_for_split(lowercase , lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowercase , lowercase )
if shard_lengths:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase )
SCREAMING_SNAKE_CASE : Any = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
SCREAMING_SNAKE_CASE : List[str] = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE : Optional[int] = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 362
|
def lowerCamelCase__ ( ):
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
snake_case = generate_large_matrix()
snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2
SCREAMING_SNAKE_CASE : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE : List[Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = len(grid[0] )
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def lowerCamelCase__ ( ):
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
SCREAMING_SNAKE_CASE : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 319
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def lowerCamelCase__ ( lowercase , lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Dict = ""
else:
SCREAMING_SNAKE_CASE : Tuple = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : Dict = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = dct.pop(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = val
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE : Tuple = 1000
SCREAMING_SNAKE_CASE : Dict = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Tuple = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE : int = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
SCREAMING_SNAKE_CASE : List[Any] = 192
SCREAMING_SNAKE_CASE : Union[str, Any] = 768
SCREAMING_SNAKE_CASE : Optional[int] = 12
SCREAMING_SNAKE_CASE : Tuple = 3
elif deit_name[9:].startswith("small" ):
SCREAMING_SNAKE_CASE : str = 384
SCREAMING_SNAKE_CASE : Optional[Any] = 1536
SCREAMING_SNAKE_CASE : List[str] = 12
SCREAMING_SNAKE_CASE : int = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
SCREAMING_SNAKE_CASE : int = 1024
SCREAMING_SNAKE_CASE : Any = 4096
SCREAMING_SNAKE_CASE : int = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
# load original model from timm
SCREAMING_SNAKE_CASE : Tuple = timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE : int = timm_model.state_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowercase , lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , lowercase )
# load HuggingFace model
SCREAMING_SNAKE_CASE : Union[str, Any] = DeiTForImageClassificationWithTeacher(lowercase ).eval()
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE : Dict = DeiTImageProcessor(size=lowercase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE : int = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase )
SCREAMING_SNAKE_CASE : Dict = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 363
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case = ["""small""", """medium""", """large"""]
snake_case = """lm_head.decoder.weight"""
snake_case = """lm_head.weight"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase )
SCREAMING_SNAKE_CASE : Any = d.pop(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
snake_case = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 319
| 0
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = AlbertConfig.from_json_file(lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE : int = AlbertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
from bisect import bisect
from itertools import accumulate
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(zip(lowercase , lowercase ) , key=lambda lowercase : x[0] / x[1] , reverse=lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE : str = list(accumulate(lowercase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = bisect(lowercase , lowercase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 319
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 366
|
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts
SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets
SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = src_parent
SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ )
return True
def _A ( self : Tuple , UpperCAmelCase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 319
| 0
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : CLIPSegForImageSegmentation , UpperCAmelCase_ : CLIPSegProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE : Any = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Optional[int] = FrozenDict(UpperCAmelCase_ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE : str = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = FrozenDict(UpperCAmelCase_ )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=UpperCAmelCase_ , segmentation_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def _A ( self : Tuple ):
self.enable_attention_slicing(UpperCAmelCase_ )
def _A ( self : Any ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
SCREAMING_SNAKE_CASE : int = self.segmentation_model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(UpperCAmelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , )
| 367
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''timm_backbone'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
| 319
| 0
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
snake_case = logging.get_logger(__name__)
snake_case = {}
snake_case = {}
snake_case = {}
def lowerCamelCase__ ( lowercase , lowercase , lowercase = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
SCREAMING_SNAKE_CASE : List[str] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
SCREAMING_SNAKE_CASE : str = format_type
def lowerCamelCase__ ( lowercase , lowercase , lowercase = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
SCREAMING_SNAKE_CASE : Dict = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
snake_case = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
snake_case = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
snake_case = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase__ ( lowercase , **lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = get_format_type_from_alias(lowercase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowercase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 368
|
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.