code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , a : str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = 13
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Tuple = 99
SCREAMING_SNAKE_CASE : Dict = 32
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : Dict = 4
SCREAMING_SNAKE_CASE : Union[str, Any] = 37
SCREAMING_SNAKE_CASE : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Any = 0.1
SCREAMING_SNAKE_CASE : Any = 512
SCREAMING_SNAKE_CASE : Any = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Dict = 0.02
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : Dict = 4
SCREAMING_SNAKE_CASE : List[Any] = None
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Optional[int] , a : Dict , a : Any , a : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = TFDistilBertModel(config=a )
SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE : Dict = model(a )
SCREAMING_SNAKE_CASE : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , a : Dict , a : Tuple , a : str , a : int , a : List[str] , a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TFDistilBertForMaskedLM(config=a )
SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE : List[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any , a : List[Any] , a : str , a : Union[str, Any] , a : Dict , a : Optional[int] , a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFDistilBertForQuestionAnswering(config=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
SCREAMING_SNAKE_CASE : List[str] = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : int , a : List[Any] , a : Dict , a : Tuple , a : Tuple , a : Tuple , a : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFDistilBertForSequenceClassification(a )
SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE : Optional[int] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Any , a : Any , a : Optional[int] , a : Any , a : int , a : int , a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_choices
SCREAMING_SNAKE_CASE : Dict = TFDistilBertForMultipleChoice(a )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
SCREAMING_SNAKE_CASE : Dict = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[Any] , a : List[str] , a : Dict , a : List[Any] , a : List[Any] , a : Union[str, Any] , a : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = TFDistilBertForTokenClassification(a )
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE : str = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TFDistilBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
@slow
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
SCREAMING_SNAKE_CASE : List[str] = TFDistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a )[0]
SCREAMING_SNAKE_CASE : int = [1, 6, 768]
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : str = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-4 )
| 76
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = 42
# setable values
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase_ = 42
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 10_00 , __SCREAMING_SNAKE_CASE = 0.0_001 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fixed_small" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = jnp.floataa , ):
"""simple docstring"""
lowercase_ : Dict = dtype
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if common is None:
lowercase_ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase_ : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
lowercase_ : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase_ : int = (jnp.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : List[Any] = state.common.alphas_cumprod[t]
lowercase_ : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase_ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase_ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase_ : int = jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase_ : List[str] = jnp.log(jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase_ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase_ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase_ : Optional[Any] = variance
lowercase_ : Union[str, Any] = state.common.betas[t]
lowercase_ : Union[str, Any] = (predicted_variance + 1) / 2
lowercase_ : Any = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Optional[int] = timestep
if key is None:
lowercase_ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase_ , lowercase_ : Optional[Any] = jnp.split(__SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase_ : int = None
# 1. compute alphas, betas
lowercase_ : Any = state.common.alphas_cumprod[t]
lowercase_ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase_ : int = 1 - alpha_prod_t
lowercase_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase_ : Any = model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase_ : Optional[Any] = jnp.clip(__SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase_ : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase_ : str = jax.random.split(__SCREAMING_SNAKE_CASE , num=1 )
lowercase_ : List[Any] = jax.random.normal(__SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , predicted_variance=__SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase_ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return add_noise_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return get_velocity_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 93
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ : str = logging.get_logger(__name__)
a_ : Optional[Any] = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] ='marian'
lowercase : Union[str, Any] =['past_key_values']
lowercase : Union[str, Any] ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=58_101, lowerCAmelCase=None, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=58_100, lowerCAmelCase=False, lowerCAmelCase=58_100, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =decoder_vocab_size or vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
class __UpperCamelCase ( lowerCamelCase__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase_ ={0: '''batch'''}
lowerCamelCase_ ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCamelCase_ ={0: '''batch''', 1: '''decoder_sequence'''}
lowerCamelCase_ ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase, direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase_ =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase_, lowerCamelCase_ =self.num_layers
for i in range(lowerCAmelCase ):
lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCamelCase_ =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ =super().outputs
else:
lowerCamelCase_ =super(lowerCAmelCase, self ).outputs
if self.use_past:
lowerCamelCase_, lowerCamelCase_ =self.num_layers
for i in range(lowerCAmelCase ):
lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# Generate decoder inputs
lowerCamelCase_ =seq_length if not self.use_past else 1
lowerCamelCase_ =self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ ={f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase_ =dict(**lowerCAmelCase, **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase_, lowerCamelCase_ =common_inputs['''input_ids'''].shape
lowerCamelCase_ =common_inputs['''decoder_input_ids'''].shape[1]
lowerCamelCase_, lowerCamelCase_ =self.num_attention_heads
lowerCamelCase_ =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase_ =decoder_seq_length + 3
lowerCamelCase_ =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase_ =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCAmelCase, lowerCAmelCase )], dim=1 )
lowerCamelCase_ =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase_, lowerCamelCase_ =self.num_layers
lowerCamelCase_ =min(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =max(lowerCAmelCase, lowerCAmelCase ) - min_num_layers
lowerCamelCase_ ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
lowerCamelCase_ =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCAmelCase, lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase_, lowerCamelCase_ =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase_ =seqlen + 2
lowerCamelCase_, lowerCamelCase_ =self.num_layers
lowerCamelCase_, lowerCamelCase_ =self.num_attention_heads
lowerCamelCase_ =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase_ =common_inputs['''attention_mask'''].dtype
lowerCamelCase_ =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 )
lowerCamelCase_ =[
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =compute_effective_axis_dimension(
lowerCAmelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase_ =tokenizer.num_special_tokens_to_add(lowerCAmelCase )
lowerCamelCase_ =compute_effective_axis_dimension(
lowerCAmelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase_ =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase_ =dict(tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase ) )
return common_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase )
else:
lowerCamelCase_ =self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase )
return common_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ =super()._flatten_past_key_values_(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
else:
lowerCamelCase_ =super(lowerCAmelCase, self )._flatten_past_key_values_(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 364
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6
| 0
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a : Union[str, Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print("\n".join(upper_files) + "\n")
a : Any = [file for file in filepaths if " " in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print("\n".join(space_files) + "\n")
a : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print("\n".join(hyphen_files) + "\n")
a : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print("\n".join(nodir_files) + "\n")
a : List[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 114
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''ylacombe/bark-small'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = '''en_speaker_1'''
__lowerCamelCase = '''This is a test string'''
__lowerCamelCase = '''speaker_embeddings_path.json'''
__lowerCamelCase = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCamelCase = 35
__lowerCamelCase = 2
__lowerCamelCase = 8
__lowerCamelCase = {
'''semantic_prompt''': np.ones(a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a , **a )
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
__lowerCamelCase = processor(text=self.input_string )
__lowerCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 67
| 0
|
import math
def _a ( _lowercase : int ):
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__UpperCAmelCase : Tuple = F'Input value of [number={number}] must be an integer'
raise TypeError(_snake_case )
if number < 1:
__UpperCAmelCase : Any = F'Input value of [number={number}] must be > 0'
raise ValueError(_snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCAmelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
__UpperCAmelCase : Tuple = [3, 5]
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Union[str, Any] = 3
for block in range(1 , _snake_case ):
for _ in range(_snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__UpperCAmelCase :Dict = 0
try:
__UpperCAmelCase :List[str] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 352
|
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase :Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase :Dict = os.path.join(git_repo_path, "src", "diffusers")
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any ) -> int:
__UpperCAmelCase : Optional[Any] = find_backend(''' if not is_torch_available():''' )
self.assertEqual(snake_case , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__UpperCAmelCase : Union[str, Any] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(snake_case , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__UpperCAmelCase : List[str] = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(snake_case , '''torch_and_transformers_and_onnx''' )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__UpperCAmelCase : Tuple = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , snake_case )
self.assertIn('''torch_and_transformers''' , snake_case )
self.assertIn('''flax_and_transformers''' , snake_case )
self.assertIn('''torch_and_transformers_and_onnx''' , snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase : str = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(snake_case , '''\nCONSTANT = None\n''' )
__UpperCAmelCase : Union[str, Any] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
snake_case , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__UpperCAmelCase : Optional[int] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__UpperCAmelCase : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(snake_case , snake_case )
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__UpperCAmelCase : Optional[int] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , snake_case )
| 240
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "weight" in name:
lowerCamelCase_ = "weight"
elif "bias" in name:
lowerCamelCase_ = "bias"
else:
lowerCamelCase_ = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ):
lowerCamelCase_ = SEWConfig()
if is_finetuned:
lowerCamelCase_ = model.wav_encoder.wav_model.cfg
else:
lowerCamelCase_ = model.cfg
lowerCamelCase_ = fs_config.conv_bias
lowerCamelCase_ = eval(fs_config.conv_feature_layers )
lowerCamelCase_ = [x[0] for x in conv_layers]
lowerCamelCase_ = [x[1] for x in conv_layers]
lowerCamelCase_ = [x[2] for x in conv_layers]
lowerCamelCase_ = "gelu"
lowerCamelCase_ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
lowerCamelCase_ = 0.0
lowerCamelCase_ = fs_config.activation_fn.name
lowerCamelCase_ = fs_config.encoder_embed_dim
lowerCamelCase_ = 0.02
lowerCamelCase_ = fs_config.encoder_ffn_embed_dim
lowerCamelCase_ = 1E-5
lowerCamelCase_ = fs_config.encoder_layerdrop
lowerCamelCase_ = fs_config.encoder_attention_heads
lowerCamelCase_ = fs_config.conv_pos_groups
lowerCamelCase_ = fs_config.conv_pos
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = fs_config.encoder_layers
lowerCamelCase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase_ = model.cfg
lowerCamelCase_ = fs_config.final_dropout
lowerCamelCase_ = fs_config.layerdrop
lowerCamelCase_ = fs_config.activation_dropout
lowerCamelCase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase_ = fs_config.attention_dropout
lowerCamelCase_ = fs_config.dropout_input
lowerCamelCase_ = fs_config.dropout
lowerCamelCase_ = fs_config.mask_channel_length
lowerCamelCase_ = fs_config.mask_channel_prob
lowerCamelCase_ = fs_config.mask_length
lowerCamelCase_ = fs_config.mask_prob
lowerCamelCase_ = "Wav2Vec2FeatureExtractor"
lowerCamelCase_ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : int=True ):
if is_finetuned:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase_ = SEWConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = convert_config(model[0] , UpperCAmelCase_ )
lowerCamelCase_ = model[0].eval()
lowerCamelCase_ = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
if is_finetuned:
if dict_path:
lowerCamelCase_ = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ = target_dict.pad_index
lowerCamelCase_ = target_dict.bos_index
lowerCamelCase_ = target_dict.pad_index
lowerCamelCase_ = target_dict.bos_index
lowerCamelCase_ = target_dict.eos_index
lowerCamelCase_ = len(target_dict.symbols )
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , "vocab.json" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , UpperCAmelCase_ )
lowerCamelCase_ = WavaVecaCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase_ , )
lowerCamelCase_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = SEWForCTC(UpperCAmelCase_ )
else:
lowerCamelCase_ = SEWModel(UpperCAmelCase_ )
feature_extractor.save_pretrained(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ : int = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 55
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCamelCase__ : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCamelCase__ : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
lowerCamelCase__ : int = BeautifulSoup(res.text, 'html.parser')
lowerCamelCase__ : List[str] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''')
| 225
| 0
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
SCREAMING_SNAKE_CASE_:Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE_:Optional[int] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 115
|
from random import randint, random
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = 5 , ) -> list:
"""simple docstring"""
A : Any = [[-1] * number_of_cells] # Create a highway without any car
A : Tuple = 0
A : Dict = max(_lowerCAmelCase , 0 )
while i < number_of_cells:
A : Any = (
randint(0 , _lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : List[str] = 0
A : Dict = highway_now[car_index + 1 :]
for cell in range(len(_lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_lowerCAmelCase , -1 )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list:
"""simple docstring"""
A : str = len(_lowerCAmelCase )
# Beforce calculations, the highway is empty
A : Any = [-1] * number_of_cells
for car_index in range(_lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
A : str = min(highway_now[car_index] + 1 , _lowerCAmelCase )
# Number of empty cell before the next car
A : Optional[int] = get_distance(_lowerCAmelCase , _lowerCAmelCase ) - 1
# We can't have the car causing an accident
A : Any = min(next_highway[car_index] , _lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
A : Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list:
"""simple docstring"""
A : Any = len(highway[0] )
for i in range(_lowerCAmelCase ):
A : Optional[int] = update(highway[i] , _lowerCAmelCase , _lowerCAmelCase )
A : Tuple = [-1] * number_of_cells
for car_index in range(_lowerCAmelCase ):
A : Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
A : Optional[int] = (car_index + speed) % number_of_cells
# Commit the change of position
A : Dict = speed
highway.append(_lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"image": Image()} )
_UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
_UpperCamelCase : str = "image"
_UpperCamelCase : str = "labels"
def __A ( self , a__ ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , a__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : str = copy.deepcopy(self )
_lowerCAmelCase : Dict = self.label_schema.copy()
_lowerCAmelCase : str = features[self.label_column]
_lowerCAmelCase : Optional[Any] = label_schema
return task_template
@property
def __A ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 44
|
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=30 , A__=400 , A__=True , A__=None , A__=0.9 , A__=None , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , ):
A__ : Tuple = size if size is not None else {"""shortest_edge""": 30}
A__ : Any = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
A__ : Union[str, Any] = parent
A__ : Tuple = batch_size
A__ : List[Any] = num_channels
A__ : Any = min_resolution
A__ : List[str] = max_resolution
A__ : Any = do_resize_and_center_crop
A__ : Any = size
A__ : Union[str, Any] = crop_pct
A__ : List[str] = crop_size
A__ : Any = do_normalize
A__ : Any = image_mean
A__ : List[str] = image_std
def __A ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _a (UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = PoolFormerImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : Tuple = PoolFormerImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """crop_pct""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
def __A ( self ):
A__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
A__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
A__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
A__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : Union[str, Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
A__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : Union[str, Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
A__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : List[Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 368
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase (lowercase_: str , lowercase_: Optional[int] ) -> str:
A__ : Union[str, Any] = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ : Any = old_name.split(""".""" )
if layer == "0":
A__ : List[Any] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A__ : Optional[int] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A__ : int = old_name.replace("""3""" , """convolution2""" )
else:
A__ : Dict = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , lowercase_ ):
A__ : str = r"""\b\d{2}\b"""
if bool(re.search(lowercase_ , lowercase_ ) ):
A__ : Optional[Any] = re.search(r"""\d\.\d\d.""" , lowercase_ ).group()
else:
A__ : int = re.search(r"""\d\.\d.""" , lowercase_ ).group()
if int(match[0] ) < 6:
A__ : Optional[Any] = old_name.replace(lowercase_ , """""" )
A__ : Tuple = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A__ : int = """intermediate_stages.""" + trimmed_name
else:
A__ : Dict = old_name.replace(lowercase_ , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A__ : Optional[int] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A__ : Optional[Any] = str(int(match[2] ) - num_meta4D_last_stage )
A__ : Dict = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A__ : str = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A__ : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A__ : List[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A__ : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A__ : str = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , lowercase_ ):
A__ : List[str] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A__ : Optional[int] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ : Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ : int = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A__ : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A__ : Optional[int] = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A__ : Optional[Any] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A__ : Optional[Any] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ : Union[str, Any] = new_name.replace("""norm""" , """layernorm""" )
A__ : Union[str, Any] = """efficientformer.""" + new_name
else:
A__ : int = """efficientformer.encoder.""" + new_name
return new_name
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> Tuple:
for key in checkpoint.copy().keys():
A__ : List[Any] = checkpoint.pop(lowercase_ )
A__ : Dict = val
return checkpoint
def UpperCamelCase () -> Optional[int]:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : List[str] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def UpperCamelCase (lowercase_: Path , lowercase_: Path , lowercase_: Path , lowercase_: bool ) -> Tuple:
A__ : Any = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
A__ : List[Any] = EfficientFormerConfig.from_json_file(lowercase_ )
A__ : Any = EfficientFormerForImageClassificationWithTeacher(lowercase_ )
A__ : List[str] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A__ : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
A__ : Any = convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A__ : Optional[int] = prepare_img()
A__ : Optional[Any] = 256
A__ : str = 224
A__ : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A__ : Tuple = processor(images=lowercase_ , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A__ : List[Any] = Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
A__ : Any = image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
A__ : Optional[int] = model(lowercase_ )
A__ : List[str] = outputs.logits
A__ : Tuple = (1, 1000)
if "l1" in model_name:
A__ : List[str] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ : Union[str, Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
A_ : List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 141
| 0
|
'''simple docstring'''
def __magic_name__( lowerCamelCase):
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = credit_card_number
__lowerCAmelCase = 0
__lowerCAmelCase = len(a__) - 2
for i in range(a__, -1, -2):
# double the value of every second digit
__lowerCAmelCase = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
__lowerCAmelCase = cc_number[:i] + str(a__) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a__) - 1, -1, -2):
total += int(cc_number[i])
return total % 1_0 == 0
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""")
return False
if not 1_3 <= len(a__) <= 1_6:
print(F"""{error_message} of its length.""")
return False
if not validate_initial_digits(a__):
print(F"""{error_message} of its first two digits.""")
return False
if not luhn_validation(a__):
print(F"""{error_message} it fails the Luhn check.""")
return False
print(F"""{credit_card_number} is a valid credit card number.""")
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 174
|
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 6
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return generator, ["Something to write", "Something else"]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = generator('Something there' )
self.assertEqual(lowerCAmelCase__ , [{'generated_text': ANY(lowerCAmelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
[{'generated_text': ANY(lowerCAmelCase__ )}, {'generated_text': ANY(lowerCAmelCase__ )}],
[{'generated_text': ANY(lowerCAmelCase__ )}, {'generated_text': ANY(lowerCAmelCase__ )}],
] , )
SCREAMING_SNAKE_CASE_ : Dict = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
[{'generated_text': ANY(lowerCAmelCase__ )}, {'generated_text': ANY(lowerCAmelCase__ )}],
[{'generated_text': ANY(lowerCAmelCase__ )}, {'generated_text': ANY(lowerCAmelCase__ )}],
] , )
with self.assertRaises(lowerCAmelCase__ ):
generator(4 )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_ : Optional[int] = generator('Something there' , do_sample=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [{'generated_text': ''}] )
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : Dict = generator(
'Something there' , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = generator('This is a test' , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
SCREAMING_SNAKE_CASE_ : List[str] = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : Tuple = '<pad>'
SCREAMING_SNAKE_CASE_ : Optional[int] = generator(
['This is a test', 'This is a second test'] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_ : int = generator('Something there' , do_sample=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [{'generated_text': ''}] )
| 367
|
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase__ : Optional[int] =re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCAmelCase__ : List[Any] =re.compile(R'([a-z\d])([A-Z])')
lowerCAmelCase__ : Dict =re.compile(R'(?<!_)_(?!_)')
lowerCAmelCase__ : int =re.compile(R'(_{2,})')
lowerCAmelCase__ : Optional[Any] =R'^\w+(\.\w+)*$'
lowerCAmelCase__ : List[Any] =R'<>:/\|?*'
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = _uppercase_uppercase_re.sub(r'\1_\2', A__ )
SCREAMING_SNAKE_CASE_ : List[str] = _lowercase_uppercase_re.sub(r'\1_\2', A__ )
return name.lower()
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = _single_underscore_re.split(A__ )
SCREAMING_SNAKE_CASE_ : str = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != '' )
def a__ ( A__ ):
if os.path.basename(A__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(A__ )
def a__ ( A__, A__ ):
if os.path.basename(A__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, A__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(A__ )}-{split}'''
def a__ ( A__, A__, A__, A__=None ):
SCREAMING_SNAKE_CASE_ : Tuple = filename_prefix_for_split(A__, A__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(A__, A__ )
return F'''{filepath}*'''
def a__ ( A__, A__, A__, A__=None, A__=None ):
SCREAMING_SNAKE_CASE_ : Tuple = filename_prefix_for_split(A__, A__ )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(A__, A__ )
if shard_lengths:
SCREAMING_SNAKE_CASE_ : Dict = len(A__ )
SCREAMING_SNAKE_CASE_ : Any = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(A__ )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_ : Optional[int] = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 162
| 0
|
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
if len(lowerCAmelCase ) <= 1:
return [tuple(lowerCAmelCase )]
SCREAMING_SNAKE_CASE_ : Tuple = []
def generate(lowerCAmelCase : int , lowerCAmelCase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase )
generate(len(lowerCAmelCase ) , lowerCAmelCase )
return res
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 18
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = XLMTokenizer
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Any ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowerCamelCase__( self :Any ,__snake_case :int ) -> Optional[int]:
a__ = 'lower newer'
a__ = 'lower newer'
return input_text, output_text
def lowerCamelCase__( self :Tuple ) -> Tuple:
a__ = XLMTokenizer(self.vocab_file ,self.merges_file )
a__ = 'lower'
a__ = ['low', 'er</w>']
a__ = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case ,__snake_case )
a__ = tokens + ['<unk>']
a__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) ,__snake_case )
@slow
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
a__ = tokenizer.encode('sequence builders' ,add_special_tokens=__snake_case )
a__ = tokenizer.encode('multi-sequence build' ,add_special_tokens=__snake_case )
a__ = tokenizer.build_inputs_with_special_tokens(__snake_case )
a__ = tokenizer.build_inputs_with_special_tokens(__snake_case ,__snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 240
| 0
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def _SCREAMING_SNAKE_CASE ( ) ->Union[str, Any]:
'''simple docstring'''
a : str = 9
a : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a : Tuple = kruskal(_lowerCamelCase , _lowerCamelCase )
a : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_lowerCamelCase ) == sorted(_lowerCamelCase )
| 370
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100 ) ->int:
'''simple docstring'''
a : Dict = sum(i * i for i in range(1 , n + 1 ) )
a : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase : Dict = 50_0000
UpperCAmelCase , UpperCAmelCase : Optional[int] = os.path.split(__file__)
UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowerCamelCase ( _UpperCamelCase : datasets.Dataset , **_UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = dataset.map(**_UpperCamelCase )
@get_duration
def lowerCamelCase ( _UpperCamelCase : datasets.Dataset , **_UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : str = dataset.filter(**_UpperCamelCase )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
__UpperCAmelCase : Optional[int] = generate_example_dataset(
os.path.join(_UpperCamelCase , """dataset.arrow""" ) , _UpperCamelCase , num_examples=_UpperCamelCase )
__UpperCAmelCase : List[Any] = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase : Any ):
return tokenizer(examples["""text"""] )
__UpperCAmelCase : Optional[int] = map(_UpperCamelCase )
__UpperCAmelCase : List[Any] = map(_UpperCamelCase , batched=_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="""numpy""" ):
__UpperCAmelCase : Tuple = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="""pandas""" ):
__UpperCAmelCase : List[Any] = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
__UpperCAmelCase : Union[str, Any] = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
__UpperCAmelCase : List[Any] = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
__UpperCAmelCase : Any = map(_UpperCamelCase , function=_UpperCamelCase , batched=_UpperCamelCase )
__UpperCAmelCase : Tuple = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase , """wb""" ) as f:
f.write(json.dumps(_UpperCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 115
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Union[str, Any] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 115
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase__ ( a_ ):
'''simple docstring'''
UpperCamelCase = "Salesforce/blip-image-captioning-base"
UpperCamelCase = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
UpperCamelCase = "image_captioner"
UpperCamelCase = AutoModelForVisionaSeq
UpperCamelCase = ["image"]
UpperCamelCase = ["text"]
def __init__( self : int , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(images=_UpperCAmelCase , return_tensors="pt" )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
return self.model.generate(**_UpperCAmelCase )
def lowercase__ ( self : int , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0].strip()
| 350
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''roberta'''
def __init__( self : int , _UpperCAmelCase : List[Any]=50265 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[Any]=1e-12 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=None , **_UpperCAmelCase : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 241
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase( __UpperCamelCase : Dict ):
if is_torch_version('''<''' ,'''2.0.0''' ) or not hasattr(__UpperCamelCase ,'''_dynamo''' ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : bool = True ):
lowerCAmelCase_ : List[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCAmelCase_ : Optional[Any] = is_compiled_module(__UpperCamelCase )
if is_compiled:
lowerCAmelCase_ : Optional[Any] = model
lowerCAmelCase_ : str = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase_ : int = model.module
if not keep_fpaa_wrapper:
lowerCAmelCase_ : List[Any] = getattr(__UpperCamelCase ,'''forward''' )
lowerCAmelCase_ : Any = model.__dict__.pop('''_original_forward''' ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,'''__wrapped__''' ):
lowerCAmelCase_ : Tuple = forward.__wrapped__
if forward == original_forward:
break
lowerCAmelCase_ : str = forward
if getattr(__UpperCamelCase ,'''_converted_to_transformer_engine''' ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
lowerCAmelCase_ : List[str] = model
lowerCAmelCase_ : str = compiled_model
return model
def UpperCamelCase( ):
PartialState().wait_for_everyone()
def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def UpperCamelCase( **__UpperCamelCase : Optional[int] ):
for key, value in kwargs.items():
lowerCAmelCase_ : Dict = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase( __UpperCamelCase : Optional[int] ):
if not hasattr(__UpperCamelCase ,'''__qualname__''' ) and not hasattr(__UpperCamelCase ,'''__name__''' ):
lowerCAmelCase_ : Dict = getattr(__UpperCamelCase ,'''__class__''' ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,'''__qualname__''' ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,'''__name__''' ):
return obj.__name__
return str(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : int ):
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
lowerCAmelCase_ : Tuple = value
return destination
def UpperCamelCase( __UpperCamelCase : int = None ):
if port is None:
lowerCAmelCase_ : str = 29500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 103
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self : int ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str]=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__lowercase =torch.manual_seed(__lowercase )
else:
__lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self : int ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self : str ):
"""simple docstring"""
self._test_save_load_local()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 141
| 0
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowercase__ = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = _TestCommandArgs(dataset=lowerCAmelCase__ , all_configs=lowerCAmelCase__ , save_infos=lowerCAmelCase__ )
lowercase = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowercase = os.path.join(lowerCAmelCase__ , '''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowercase = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 235_1563,
'''num_examples''': 1_0000,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_8418,
'''num_examples''': 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase , lowercase = getattr(dataset_infos['''default'''] , lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] , lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ , lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 359
|
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCamelCase ( lowerCAmelCase__="ro" , lowerCAmelCase__="en" , lowerCAmelCase__="wmt16" , lowerCAmelCase__=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
lowercase = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
lowercase = datasets.load_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
if save_dir is None:
lowercase = f'{dataset}-{pair}'
lowercase = Path(lowerCAmelCase__ )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
lowercase = '''val''' if split == '''validation''' else split
lowercase = save_dir.joinpath(f'{fn}.source' )
lowercase = save_dir.joinpath(f'{fn}.target' )
lowercase = src_path.open('''w+''' )
lowercase = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 97
| 0
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None ):
'''simple docstring'''
super().__init__()
__A : Optional[int] = pad_token_id
__A : str = max_length
__A : Union[str, Any] = vocab
__A : int = merges
__A : Dict = BytePairTokenizer(UpperCamelCase__ , UpperCamelCase__ , sequence_length=UpperCamelCase__ )
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = [''' '''.join(UpperCamelCase__ ) for m in tokenizer.bpe_ranks.keys()]
__A : Optional[Any] = tokenizer.get_vocab()
return cls(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
__A : int = GPTaTokenizer.from_pretrained(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
return cls.from_tokenizer(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase ):
'''simple docstring'''
return cls(**UpperCamelCase__ )
def UpperCamelCase__( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : List[Any] = self.tf_tokenizer(UpperCamelCase__ )
__A : Tuple = tf.ones_like(UpperCamelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__A : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
__A , __A : Union[str, Any] = pad_model_inputs(
UpperCamelCase__ , max_seq_length=UpperCamelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 179
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 162
| 0
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class _lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : str , **__snake_case : Optional[Any] )-> List[Any]:
super().__init__(**__lowerCamelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(__lowerCamelCase )
def lowerCAmelCase ( self : List[str] , **__snake_case : int )-> Tuple:
snake_case = {}
snake_case = {}
snake_case = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
snake_case = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
snake_case = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
snake_case = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
snake_case = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
snake_case = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
snake_case = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
snake_case = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
snake_case = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
snake_case = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
snake_case = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , __snake_case : Optional[Any] , *__snake_case : Optional[int] , __snake_case : int=None , __snake_case : Union[str, Any]=None , **__snake_case : Any )-> Tuple:
return super().__call__(__lowerCamelCase , *__lowerCamelCase , num_workers=__lowerCamelCase , batch_size=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : str , __snake_case : int , __snake_case : str=64 , __snake_case : str = 0 , __snake_case : int = 5_12 / 15_00 , __snake_case : Any = 32 , __snake_case : int = 1 , )-> Optional[int]:
snake_case = load_image(__lowerCamelCase )
snake_case = self.image_processor.size["""longest_edge"""]
snake_case , snake_case , snake_case , snake_case = self.image_processor.generate_crop_boxes(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case = self.image_processor(images=__lowerCamelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
snake_case = self.get_inference_context()
with inference_context():
snake_case = self._ensure_tensor_on_device(__lowerCamelCase , device=self.device )
snake_case = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
snake_case = image_embeddings
snake_case = grid_points.shape[1]
snake_case = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , __lowerCamelCase , __lowerCamelCase ):
snake_case = grid_points[:, i : i + points_per_batch, :, :]
snake_case = input_labels[:, i : i + points_per_batch]
snake_case = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCAmelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : Optional[Any]=0.88 , __snake_case : List[str]=0.95 , __snake_case : int=0 , __snake_case : List[str]=1 , )-> Tuple:
snake_case = model_inputs.pop("""input_boxes""" )
snake_case = model_inputs.pop("""is_last""" )
snake_case = model_inputs.pop("""original_sizes""" ).tolist()
snake_case = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
snake_case = self.model(**__lowerCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case = model_outputs["""pred_masks"""]
snake_case = self.image_processor.post_process_masks(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , binarize=__lowerCamelCase )
snake_case = model_outputs["""iou_scores"""]
snake_case , snake_case , snake_case = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCAmelCase ( self : Tuple , __snake_case : int , __snake_case : Optional[Any]=False , __snake_case : Any=False , __snake_case : Optional[int]=0.7 , )-> int:
snake_case = []
snake_case = []
snake_case = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
snake_case = torch.cat(__lowerCamelCase )
snake_case = torch.cat(__lowerCamelCase )
snake_case , snake_case , snake_case , snake_case = self.image_processor.post_process_for_mask_generation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case = defaultdict(__lowerCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowerCamelCase )
snake_case = {}
if output_rle_mask:
snake_case = rle_mask
if output_bboxes_mask:
snake_case = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 366
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3
| 0
|
import argparse
import json
from tqdm import tqdm
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=SCREAMING_SNAKE_CASE__ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=SCREAMING_SNAKE_CASE__ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=SCREAMING_SNAKE_CASE__ , help='where to store parsed gold_data_path file' , )
__UpperCamelCase =parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__UpperCamelCase =json.load(SCREAMING_SNAKE_CASE__ )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =dpr_record['question']
__UpperCamelCase =[context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(SCREAMING_SNAKE_CASE__ ) + '\n' )
if __name__ == "__main__":
main()
| 62
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCamelCase_ = logging.getLogger(__name__)
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.exists(__lowercase ):
if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile(
os.path.join(__lowercase , "config.json" ) ):
os.remove(os.path.join(__lowercase , "config.json" ) )
if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__lowercase , "pytorch_model.bin" ) ):
os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) )
else:
os.makedirs(__lowercase )
model.save_pretrained(__lowercase )
def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]:
'''simple docstring'''
_A = 2
if unlogit:
_A = torch.pow(__lowercase , __lowercase )
_A = p * torch.log(__lowercase )
_A = 0
return -plogp.sum(dim=-1 )
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) )
for row in range(len(__lowercase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int:
'''simple docstring'''
_A , _A = model.config.num_hidden_layers, model.config.num_attention_heads
_A = torch.zeros(__lowercase , __lowercase ).to(args.device )
_A = torch.zeros(__lowercase , __lowercase ).to(args.device )
if head_mask is None:
_A = torch.ones(__lowercase , __lowercase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowercase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A = None
_A = 0.0
_A = 0.0
for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_A = tuple(t.to(args.device ) for t in inputs )
((_A) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A = model(__lowercase , labels=__lowercase , head_mask=__lowercase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A , _A , _A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowercase ):
_A = entropy(attn.detach() , __lowercase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A = 2
_A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__lowercase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__lowercase )
logger.info("Head ranked by importance scores" )
_A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A = torch.arange(
head_importance.numel() , device=args.device )
_A = head_ranks.view_as(__lowercase )
print_ad_tensor(__lowercase )
return attn_entropy, head_importance, total_loss
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase )
_A = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold )
_A = torch.ones_like(__lowercase )
_A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A = original_score
while current_score >= original_score * args.masking_threshold:
_A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A = float("Inf" )
_A = head_importance.view(-1 ).sort()[1]
if len(__lowercase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_A = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_A = new_head_mask.view(-1 )
_A = 0.0
_A = new_head_mask.view_as(__lowercase )
_A = new_head_mask.clone().detach()
print_ad_tensor(__lowercase )
# Compute metric and head importance again
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase )
_A = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(__lowercase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase )
_A = 1 / loss
_A = datetime.now() - before_time
_A = sum(p.numel() for p in model.parameters() )
_A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowercase , __lowercase ):
_A = [
v,
]
assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowercase )
_A = sum(p.numel() for p in model.parameters() )
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , )
_A = 1 / loss
_A = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(__lowercase , args.output_dir )
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=__lowercase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." )
parser.add_argument("--seed" , type=__lowercase , default=42 )
parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." )
_A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A = torch.device("cuda" , args.local_rank )
_A = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A = nn.parallel.DistributedDataParallel(
__lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase )
elif args.n_gpu > 1:
_A = nn.DataParallel(__lowercase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowercase )
torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __lowercase )
# Prepare dataset
_A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A = (torch.from_numpy(__lowercase ),)
_A = TensorDataset(*__lowercase )
_A = RandomSampler(__lowercase )
_A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowercase , __lowercase , __lowercase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A = mask_heads(__lowercase , __lowercase , __lowercase )
prune_heads(__lowercase , __lowercase , __lowercase , __lowercase )
if __name__ == "__main__":
main()
| 79
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model"}
_snake_case = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
_snake_case = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
_snake_case = 0
_snake_case = 1
_snake_case = 2
_snake_case = 3
_snake_case = 4
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = "left"
def __init__( self , _a , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_A : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_A : Tuple = 3
_A : Tuple = do_lower_case
_A : Union[str, Any] = remove_space
_A : Union[str, Any] = keep_accents
_A : Optional[int] = vocab_file
_A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def a__ ( self ) -> str:
return len(self.sp_model )
def a__ ( self ) -> List[Any]:
_A : int = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.__dict__.copy()
_A : Tuple = None
return state
def __setstate__( self , _a ) -> Dict:
_A : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A : List[Any] = {}
_A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self , _a ) -> Optional[Any]:
if self.remove_space:
_A : Tuple = """ """.join(inputs.strip().split() )
else:
_A : List[str] = inputs
_A : Dict = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_A : Optional[Any] = unicodedata.normalize("""NFKD""" , _a )
_A : int = """""".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
_A : Union[str, Any] = outputs.lower()
return outputs
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = self.preprocess_text(_a )
_A : Union[str, Any] = self.sp_model.encode(_a , out_type=_a )
_A : int = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_A : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A : List[Any] = cur_pieces[1:]
else:
_A : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def a__ ( self , _a ) -> Dict:
return self.sp_model.PieceToId(_a )
def a__ ( self , _a ) -> int:
return self.sp_model.IdToPiece(_a )
def a__ ( self , _a ) -> Optional[int]:
_A : List[Any] = """""".join(_a ).replace(_a , """ """ ).strip()
return out_string
def a__ ( self , _a , _a = False , _a = None , _a = True , **_a , ) -> str:
_A : Any = kwargs.pop("""use_source_tokenizer""" , _a )
_A : Tuple = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_A : int = []
_A : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
_A : Dict = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_A : Tuple = """""".join(_a )
_A : List[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_A : Tuple = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Union[str, Any] = [self.sep_token_id]
_A : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[Any] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
_A : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 343
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCAmelCase : Optional[Any] = ksize + 1
lowerCAmelCase : List[Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_snake_case ):
for x in range(_snake_case ):
# distance from center
lowerCAmelCase : Any = x - ksize // 2
lowerCAmelCase : Any = y - ksize // 2
# degree to radiant
lowerCAmelCase : Any = theta / 180 * np.pi
lowerCAmelCase : Tuple = np.cos(_theta )
lowerCAmelCase : Dict = np.sin(_theta )
# get kernel x
lowerCAmelCase : str = cos_theta * px + sin_theta * py
# get kernel y
lowerCAmelCase : List[Any] = -sin_theta * px + cos_theta * py
# fill kernel
lowerCAmelCase : Tuple = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
snake_case__ : List[str] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
snake_case__ : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
snake_case__ : Any = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
snake_case__ : Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
snake_case__ : Tuple = out / out.max() * 255
snake_case__ : Optional[Any] = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 60
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : List[str] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Union[str, Any] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Union[str, Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Dict = ["""flax"""]
def __init__( self : Any , *a_ : Optional[int] , **a_ : str ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : str , *a_ : Optional[int] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Dict , **a_ : str ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[int] , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Dict , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Tuple , *a_ : Optional[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[int] , *a_ : List[Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : str , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Any , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Optional[int] , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : int = ["""flax"""]
def __init__( self : Dict , *a_ : str , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Any , *a_ : Any , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Dict , **a_ : Dict ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Any , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : List[Any] , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Tuple , *a_ : Optional[int] , **a_ : Union[str, Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[str] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Dict ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : List[str] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Optional[int] , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
| 241
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = 1_0
_UpperCAmelCase = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_UpperCAmelCase = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(__lowerCAmelCase ) ),
} , features=__lowerCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[str] , a__: str ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__lowerCAmelCase )
return filename
# FILE_CONTENT + files
lowerCAmelCase__ :List[str] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt'''
_UpperCAmelCase = FILE_CONTENT
with open(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase )
return filename
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[str] ) -> Optional[int]:
'''simple docstring'''
import bza
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.bz2'''
_UpperCAmelCase = bytes(__lowerCAmelCase , 'utf-8' )
with bza.open(__lowerCAmelCase , 'wb' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Dict:
'''simple docstring'''
import gzip
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_UpperCAmelCase = bytes(__lowerCAmelCase , 'utf-8' )
with gzip.open(__lowerCAmelCase , 'wb' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.lz4'''
_UpperCAmelCase = bytes(__lowerCAmelCase , 'utf-8' )
with lza.frame.open(__lowerCAmelCase , 'wb' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: int , a__: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(__lowerCAmelCase , 'w' ) as archive:
archive.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Any , a__: List[str] ) -> Optional[Any]:
'''simple docstring'''
import tarfile
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.tar'''
with tarfile.TarFile(__lowerCAmelCase , 'w' ) as f:
f.add(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[Any] ) -> str:
'''simple docstring'''
import lzma
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.xz'''
_UpperCAmelCase = bytes(__lowerCAmelCase , 'utf-8' )
with lzma.open(__lowerCAmelCase , 'wb' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] , a__: Optional[int] ) -> List[str]:
'''simple docstring'''
import zipfile
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.zst'''
_UpperCAmelCase = bytes(__lowerCAmelCase , 'utf-8' )
with zstd.open(__lowerCAmelCase , 'wb' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Any ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.xml'''
_UpperCAmelCase = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase )
return filename
lowerCAmelCase__ :List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowerCAmelCase__ :List[Any] = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowerCAmelCase__ :List[Any] = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase__ :Tuple = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowerCAmelCase__ :str = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = datasets.Dataset.from_dict(__lowerCAmelCase )
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: int ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__lowerCAmelCase ) ) as con:
_UpperCAmelCase = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__lowerCAmelCase , 'w' , newline='' ) as f:
_UpperCAmelCase = csv.DictWriter(__lowerCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__lowerCAmelCase , 'w' , newline='' ) as f:
_UpperCAmelCase = csv.DictWriter(__lowerCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: str , a__: int ) -> Union[str, Any]:
'''simple docstring'''
import bza
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.csv.bz2'''
with open(__lowerCAmelCase , 'rb' ) as f:
_UpperCAmelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__lowerCAmelCase , 'wb' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Any , a__: List[Any] , a__: Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
f.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int] , a__: Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__lowerCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: List[str] , a__: Dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__lowerCAmelCase ) ) )
f.write(__lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_UpperCAmelCase = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__lowerCAmelCase , 'wb' ) as f:
_UpperCAmelCase = pq.ParquetWriter(__lowerCAmelCase , schema=__lowerCAmelCase )
_UpperCAmelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__lowerCAmelCase ) )] for k in DATA[0]} , schema=__lowerCAmelCase )
writer.write_table(__lowerCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_UpperCAmelCase = {'''data''': DATA}
with open(__lowerCAmelCase , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_UpperCAmelCase = {'''data''': DATA_DICT_OF_LISTS}
with open(__lowerCAmelCase , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] ) -> int:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__lowerCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__lowerCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__lowerCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__lowerCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__lowerCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__lowerCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__lowerCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__lowerCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict ) -> int:
'''simple docstring'''
import gzip
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__lowerCAmelCase , 'rb' ) as orig_file:
with gzip.open(__lowerCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict , a__: Optional[Any] ) -> List[Any]:
'''simple docstring'''
import gzip
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__lowerCAmelCase , 'rb' ) as orig_file:
with gzip.open(__lowerCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: str , a__: Any , a__: List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
f.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: str , a__: Any , a__: Dict , a__: int ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.join('nested' , os.path.basename(__lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple , a__: Any , a__: List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__lowerCAmelCase ) ) )
f.write(__lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Optional[Any] , a__: List[str] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(__lowerCAmelCase , 'w' ) as f:
f.add(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
f.add(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Any , a__: Optional[int] , a__: Tuple , a__: Any ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(__lowerCAmelCase , 'w' ) as f:
f.add(__lowerCAmelCase , arcname=os.path.join('nested' , os.path.basename(__lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: str ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ['''0''', '''1''', '''2''', '''3''']
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__lowerCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = ['''0''', '''1''', '''2''', '''3''']
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__lowerCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = ['''0''', '''1''', '''2''', '''3''']
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.abc'''
with open(__lowerCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple , a__: Union[str, Any] , a__: List[str] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.text.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
f.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[str] , a__: str , a__: Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__lowerCAmelCase ) ) )
f.write(__lowerCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__lowerCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[Any] , a__: int , a__: Tuple ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(__lowerCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = '''\n'''.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict , a__: Any ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.img.zip'''
with zipfile.ZipFile(__lowerCAmelCase , 'w' ) as f:
f.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ) )
f.write(__lowerCAmelCase , arcname=os.path.basename(__lowerCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 350
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ :Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :str = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[str] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :str = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 185
| 0
|
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
a__ : Tuple =len(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
a__ , a__ : Tuple =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase : Dict = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 95
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 97
| 0
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase (_A ):
"""simple docstring"""
def is_in_circle(_A , _A ) -> bool:
_lowerCAmelCase : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_lowerCAmelCase : Any = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_A ) )
# The ratio of the area for circle to square is pi/4.
_lowerCAmelCase : List[Any] = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def lowercase (_A , _A , _A = 0.0 , _A = 1.0 , ):
"""simple docstring"""
return mean(
function_to_integrate(uniform(_A , _A ) ) for _ in range(_A ) ) * (max_value - min_value)
def lowercase (_A , _A = 0.0 , _A = 1.0 ):
"""simple docstring"""
def identity_function(_A ) -> float:
return x
_lowerCAmelCase : List[str] = area_under_curve_estimator(
_A , _A , _A , _A )
_lowerCAmelCase : Optional[int] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def lowercase (_A ):
"""simple docstring"""
def function_to_integrate(_A ) -> float:
return sqrt(4.0 - x * x )
_lowerCAmelCase : Dict = area_under_curve_estimator(
_A , _A , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25
| 1
|
def __A ( __lowerCAmelCase )-> bool:
"""simple docstring"""
_UpperCAmelCase = [int(__lowerCAmelCase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(__lowerCAmelCase ) == 4 and all(0 <= int(__lowerCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
_a = input().strip()
_a = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 39
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase : Union[str, Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : Tuple = create_model(
'''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = {}
A : str = R'''.*sequential.(\d+).*'''
A : Union[str, Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A : Any = key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
# replace sequential layers with list
A : Any = re.match(snake_case__ , snake_case__ ).group(1 )
A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' )
elif re.match(snake_case__ , snake_case__ ):
A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A : str = 1 if projecton_layer == 0 else 2
A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A : int = value
A : List[Any] = mixed_qkv.size(0 ) // 3
A : Union[str, Any] = mixed_qkv[:qkv_dim]
A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
A : Optional[int] = mixed_qkv[qkv_dim * 2 :]
A : Tuple = query_layer
A : Union[str, Any] = key_layer
A : Optional[int] = value_layer
else:
A : Dict = value
return model_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ )
clap_model.eval()
A : str = clap_model.state_dict()
A : Union[str, Any] = rename_state_dict(snake_case__ )
A : Tuple = ClapConfig()
A : str = enable_fusion
A : str = ClapModel(snake_case__ )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case__ , strict=snake_case__ )
model.save_pretrained(snake_case__ )
transformers_config.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowercase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 3
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCamelCase : Optional[str] = field(
default=_a, metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCamelCase : Optional[str] = field(
default=_a, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCamelCase : Optional[str] = field(
default=_a, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
__UpperCamelCase : bool = field(default=_a, metadata={"help": "Whether tp freeze the encoder."})
__UpperCamelCase : bool = field(default=_a, metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__UpperCamelCase : Optional[str] = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
__UpperCamelCase : Optional[int] = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=128, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=142, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."})
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."})
__UpperCamelCase : Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."})
__UpperCamelCase : Optional[str] = field(default=_a, metadata={"help": "Source language id for translation."})
__UpperCamelCase : Optional[str] = field(default=_a, metadata={"help": "Target language id for translation."})
__UpperCamelCase : Optional[int] = field(default=_a, metadata={"help": "# num_beams to use for evaluation."})
__UpperCamelCase : bool = field(
default=_a, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , F"""{split}_results.json""" ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase : List[Any] = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : Dict = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase : str = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase : List[Any] = SeqaSeqDataset
# Get datasets
UpperCamelCase : Union[str, Any] = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
UpperCamelCase : Any = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase : Optional[int] = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase : List[Any] = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE_ ) if training_args.predict_with_generate else None
)
UpperCamelCase : Any = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCamelCase : str = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase : int = train_result.metrics
UpperCamelCase : Tuple = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase : str = trainer.evaluate(metric_key_prefix='''val''' )
UpperCamelCase : Dict = data_args.n_val
UpperCamelCase : Optional[Any] = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase : str = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE_ , metric_key_prefix='''test''' )
UpperCamelCase : Tuple = test_output.metrics
UpperCamelCase : Tuple = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase : Optional[int] = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.predict_with_generate:
UpperCamelCase : Tuple = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = lmap(str.strip , SCREAMING_SNAKE_CASE_ )
write_txt_file(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 353
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = """left"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : Tuple="<sep>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<cls>" , lowerCamelCase_ : Any="<mask>" , lowerCamelCase_ : Optional[int]=["<eop>", "<eod>"] , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : int , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self : str , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
if self.remove_space:
UpperCamelCase = """ """.join(inputs.strip().split() )
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize("""NFKD""" , lowerCamelCase_ )
UpperCamelCase = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.preprocess_text(lowerCamelCase_ )
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ ).replace(lowerCamelCase_ , """ """ ).strip()
return out_string
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = None , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Tuple , ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""use_source_tokenizer""" , lowerCamelCase_ )
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
UpperCamelCase = []
sub_texts.append(lowerCamelCase_ )
else:
current_sub_text.append(lowerCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_ )
return clean_text
else:
return text
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , """wb""" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 343
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCamelCase__ : Optional[int] = TOKENIZER_CLASSES
else:
UpperCamelCase__ : Optional[int] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + '''Fast''' )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCamelCase__ : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCamelCase__ : str = True
if checkpoint_name is None:
UpperCamelCase__ : Optional[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCamelCase__ : Tuple = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCamelCase__ : int = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCamelCase__ , UpperCamelCase__ : List[str] = checkpoint.split('''/''' )
UpperCamelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
UpperCamelCase__ : Tuple = checkpoint
UpperCamelCase__ : List[Any] = dump_path
else:
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Tuple = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCamelCase__ : Union[str, Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCamelCase__ : List[Any] = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
UpperCamelCase__ : str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCamelCase__ : int = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__UpperCamelCase : int = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 51
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __magic_name__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__( self : Optional[int] , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ )
UpperCamelCase__ : Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase__ )
return column
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ : Tuple = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCamelCase__ : int = {'''dtype''': torch.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ : Any = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : Optional[int] = np.asarray(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any ) -> Dict:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__ , '''__array__''' ) and not isinstance(lowerCamelCase__ , torch.Tensor ):
UpperCamelCase__ : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : dict ) -> Optional[int]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : pa.Table ) -> Mapping:
'''simple docstring'''
UpperCamelCase__ : int = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : pa.Table ) -> "torch.Tensor":
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
UpperCamelCase__ : str = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
UpperCamelCase__ : int = self.recursive_tensorize(lowerCamelCase__ )
UpperCamelCase__ : Tuple = self._consolidate(lowerCamelCase__ )
return column
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : pa.Table ) -> Mapping:
'''simple docstring'''
UpperCamelCase__ : Dict = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
UpperCamelCase__ : Any = self.python_features_decoder.decode_batch(lowerCamelCase__ )
UpperCamelCase__ : Tuple = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
UpperCamelCase__ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 51
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """roberta"""
def __init__( self : Tuple , __UpperCAmelCase : Tuple=50265 , __UpperCAmelCase : Dict=768 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Optional[int]="absolute" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : str , ):
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : int = vocab_size
a : Optional[Any] = hidden_size
a : Any = num_hidden_layers
a : Dict = num_attention_heads
a : Dict = hidden_act
a : int = intermediate_size
a : str = hidden_dropout_prob
a : List[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : str = type_vocab_size
a : Optional[int] = initializer_range
a : Optional[Any] = layer_norm_eps
a : Optional[Any] = position_embedding_type
a : Any = use_cache
a : List[str] = classifier_dropout
class _A ( _a ):
"""simple docstring"""
@property
def __snake_case ( self : int):
if self.task == "multiple-choice":
a : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 40
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VQModel
lowerCamelCase : Union[str, Any] = 'sample'
@property
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=(32, 32) ) -> Any:
__lowerCamelCase : Tuple = 4
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def lowercase_ ( self ) -> Optional[int]:
return (3, 32, 32)
@property
def lowercase_ ( self ) -> List[Any]:
return (3, 32, 32)
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
__lowerCamelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self ) -> str:
pass
def lowercase_ ( self ) -> Optional[int]:
pass
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase_ ( self ) -> int:
__lowerCamelCase : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__lowerCamelCase : List[str] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__lowerCamelCase : Optional[int] = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE_ ).sample
__lowerCamelCase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowerCamelCase : Union[str, Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
| 185
| 0
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Dict = 10
__UpperCAmelCase : List[Any] = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
__UpperCAmelCase : Tuple = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(_lowercase ) ),
} , features=_lowercase , )
return dataset
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Any , _lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=_lowercase )
return filename
# FILE_CONTENT + files
__UpperCAmelCase :Dict = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
__UpperCAmelCase : str = FILE_CONTENT
with open(_lowercase , '''w''' ) as f:
f.write(_lowercase )
return filename
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Any ):
'''simple docstring'''
import bza
__UpperCAmelCase : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
__UpperCAmelCase : List[str] = bytes(_lowercase , '''utf-8''' )
with bza.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : List[Any] ):
'''simple docstring'''
import gzip
__UpperCAmelCase : int = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
__UpperCAmelCase : List[str] = bytes(_lowercase , '''utf-8''' )
with gzip.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Any ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
__UpperCAmelCase : Union[str, Any] = bytes(_lowercase , '''utf-8''' )
with lza.frame.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : List[str] , _lowercase : int ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(_lowercase , '''w''' ) as archive:
archive.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : int , _lowercase : Tuple ):
'''simple docstring'''
import tarfile
__UpperCAmelCase : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(_lowercase , '''w''' ) as f:
f.add(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Tuple ):
'''simple docstring'''
import lzma
__UpperCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
__UpperCAmelCase : Union[str, Any] = bytes(_lowercase , '''utf-8''' )
with lzma.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Optional[Any] , _lowercase : int ):
'''simple docstring'''
import zipfile
__UpperCAmelCase : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : int ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__UpperCAmelCase : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
__UpperCAmelCase : str = bytes(_lowercase , '''utf-8''' )
with zstd.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
__UpperCAmelCase : int = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(_lowercase , '''w''' ) as f:
f.write(_lowercase )
return filename
__UpperCAmelCase :Tuple = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
__UpperCAmelCase :Optional[Any] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
__UpperCAmelCase :Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase :Dict = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
__UpperCAmelCase :Union[str, Any] = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='''session''' )
def _a ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = datasets.Dataset.from_dict(_lowercase )
__UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(_lowercase ) ) as con:
__UpperCAmelCase : int = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(_lowercase , '''w''' , newline='''''' ) as f:
__UpperCAmelCase : int = csv.DictWriter(_lowercase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(_lowercase , '''w''' , newline='''''' ) as f:
__UpperCAmelCase : Union[str, Any] = csv.DictWriter(_lowercase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
import bza
__UpperCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(_lowercase , '''rb''' ) as f:
__UpperCAmelCase : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Dict , _lowercase : Tuple , _lowercase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(_lowercase , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(_lowercase ) ) )
f.write(_lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
__UpperCAmelCase : List[Any] = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(_lowercase , '''wb''' ) as f:
__UpperCAmelCase : Optional[Any] = pq.ParquetWriter(_lowercase , schema=_lowercase )
__UpperCAmelCase : Dict = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_lowercase ) )] for k in DATA[0]} , schema=_lowercase )
writer.write_table(_lowercase )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__UpperCAmelCase : List[Any] = {'''data''': DATA}
with open(_lowercase , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__UpperCAmelCase : Union[str, Any] = {'''data''': DATA_DICT_OF_LISTS}
with open(_lowercase , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(_lowercase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(_lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(_lowercase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(_lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(_lowercase , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(_lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(_lowercase , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(_lowercase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Union[str, Any] , _lowercase : Optional[Any] ):
'''simple docstring'''
import gzip
__UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(_lowercase , '''rb''' ) as orig_file:
with gzip.open(_lowercase , '''wb''' ) as zipped_file:
zipped_file.writelines(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : List[str] , _lowercase : str ):
'''simple docstring'''
import gzip
__UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(_lowercase , '''rb''' ) as orig_file:
with gzip.open(_lowercase , '''wb''' ) as zipped_file:
zipped_file.writelines(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : Dict , _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.join('''nested''' , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : int , _lowercase : str , _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(_lowercase ) ) )
f.write(_lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(_lowercase , '''w''' ) as f:
f.add(_lowercase , arcname=os.path.basename(_lowercase ) )
f.add(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Optional[Any] , _lowercase : int , _lowercase : Dict , _lowercase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(_lowercase , '''w''' ) as f:
f.add(_lowercase , arcname=os.path.join('''nested''' , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ['''0''', '''1''', '''2''', '''3''']
__UpperCAmelCase : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(_lowercase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = ['''0''', '''1''', '''2''', '''3''']
__UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(_lowercase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ['''0''', '''1''', '''2''', '''3''']
__UpperCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(_lowercase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Optional[int] , _lowercase : Dict , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(_lowercase ) ) )
f.write(_lowercase , arcname=os.path.join('''main_dir''' , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(_lowercase , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
__UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope='''session''' )
def _a ( ):
'''simple docstring'''
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def _a ( ):
'''simple docstring'''
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Any , _lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(_lowercase , '''w''' ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( _lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 240
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase :Union[str, Any] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Dict = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Dict = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCAmelCase :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 240
| 1
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = CodeGenTokenizer
__UpperCamelCase : int = CodeGenTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = {'''add_prefix_space''': True}
__UpperCamelCase : str = False
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
SCREAMING_SNAKE_CASE__ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ : List[str] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = """lower newer"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer"""
return input_text, output_text
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : int = """lower newer"""
SCREAMING_SNAKE_CASE__ : Dict = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = """lower newer"""
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : str = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
pass
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=15 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
SCREAMING_SNAKE_CASE__ : Any = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ : Dict = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
SCREAMING_SNAKE_CASE__ : str = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
SCREAMING_SNAKE_CASE__ : str = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = """$$$"""
SCREAMING_SNAKE_CASE__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
SCREAMING_SNAKE_CASE__ : str = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
SCREAMING_SNAKE_CASE__ : int = """\nif len_a > len_b: result = a\nelse: result = b"""
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(SCREAMING_SNAKE_CASE__ , truncate_before_pattern=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
pass
| 25
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25
| 1
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = True , *UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]:
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCamelCase__ : Any = False
if main_process_only:
lowerCamelCase__ : Optional[Any] = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 363
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
if "cls_token" in name:
lowerCamelCase__ : Any = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : str = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[int] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase__ : Any = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowerCamelCase__ : Dict = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowerCamelCase__ : List[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase__ : List[str] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase__ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase__ : str = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCamelCase__ : Tuple = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[int] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCamelCase__ : int = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase__ : Dict = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : List[str] = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
lowerCamelCase__ : List[Any] = key.split(""".""" )
lowerCamelCase__ : Optional[int] = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase__ : str = config.decoder_hidden_size
lowerCamelCase__ : List[Any] = """decoder.decoder_layers."""
if "weight" in key:
lowerCamelCase__ : int = val[:dim, :]
lowerCamelCase__ : int = val[dim : dim * 2, :]
lowerCamelCase__ : Tuple = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ : Tuple = val[:dim]
lowerCamelCase__ : Optional[int] = val[dim : dim * 2]
lowerCamelCase__ : List[Any] = val[-dim:]
else:
lowerCamelCase__ : List[Any] = config.hidden_size
lowerCamelCase__ : Optional[int] = """vit.encoder.layer."""
if "weight" in key:
lowerCamelCase__ : str = val[:dim, :]
lowerCamelCase__ : List[Any] = val[dim : dim * 2, :]
lowerCamelCase__ : Optional[int] = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ : int = val[:dim]
lowerCamelCase__ : List[Any] = val[dim : dim * 2]
lowerCamelCase__ : Optional[int] = val[-dim:]
else:
lowerCamelCase__ : int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Any = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase__ : Any = 1024
lowerCamelCase__ : Optional[Any] = 4096
lowerCamelCase__ : List[str] = 24
lowerCamelCase__ : Union[str, Any] = 16
elif "huge" in checkpoint_url:
lowerCamelCase__ : List[str] = 14
lowerCamelCase__ : Dict = 1280
lowerCamelCase__ : Tuple = 5120
lowerCamelCase__ : List[str] = 32
lowerCamelCase__ : Union[str, Any] = 16
lowerCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCamelCase )
lowerCamelCase__ : str = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ : List[str] = convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCamelCase__ : List[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
lowerCamelCase__ : str = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase )
lowerCamelCase__ : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase__ : List[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
lowerCamelCase__ : int = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 129
| 0
|
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: int =[[] for _ in range(_snake_case )]
lowerCamelCase__: Union[str, Any] =key - 1
if key <= 0:
raise ValueError("Height of grid can\'t be 0 or negative" )
if key == 1 or len(_snake_case ) <= key:
return input_string
for position, character in enumerate(_snake_case ):
lowerCamelCase__: Union[str, Any] =position % (lowest * 2) # puts it in bounds
lowerCamelCase__: List[str] =min(_snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_snake_case )
lowerCamelCase__: str =["".join(_snake_case ) for row in temp_grid]
lowerCamelCase__: List[str] ="".join(_snake_case )
return output_string
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: str =[]
lowerCamelCase__: List[Any] =key - 1
if key <= 0:
raise ValueError("Height of grid can\'t be 0 or negative" )
if key == 1:
return input_string
lowerCamelCase__: Tuple =[[] for _ in range(_snake_case )] # generates template
for position in range(len(_snake_case ) ):
lowerCamelCase__: str =position % (lowest * 2) # puts it in bounds
lowerCamelCase__: int =min(_snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
lowerCamelCase__: Optional[Any] =0
for row in temp_grid: # fills in the characters
lowerCamelCase__: Optional[int] =input_string[counter : counter + len(_snake_case )]
grid.append(list(_snake_case ) )
counter += len(_snake_case )
lowerCamelCase__: int ="" # reads as zigzag
for position in range(len(_snake_case ) ):
lowerCamelCase__: Dict =position % (lowest * 2) # puts it in bounds
lowerCamelCase__: List[str] =min(_snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase_ ( __a ) -> dict[int, str]:
"""simple docstring"""
lowerCamelCase__: Optional[int] ={}
for key_guess in range(1 , len(_snake_case ) ): # tries every key
lowerCamelCase__: List[str] =decrypt(_snake_case , _snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Optional[int] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Tuple = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Tuple:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Union[str, Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : int = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : int = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Tuple:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Any = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Optional[int] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Any = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Optional[int] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Optional[Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Tuple = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> str:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : List[Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Tuple = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Optional[Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Dict = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Union[str, Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> str:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Tuple = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Optional[Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : int = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Tuple = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : int = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Tuple:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : int = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> int:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Tuple = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> str:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : List[str] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Dict = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Any = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : str = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Optional[Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : int = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : str = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Optional[Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[str]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : List[str] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : List[Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> int:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Dict = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> str:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : str = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> str:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : int = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Tuple:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : int = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> str:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : List[str] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> int:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Tuple:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : int = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> int:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Any = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Tuple:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : str = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : List[Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : List[str] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : str = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Any:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Optional[Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Any = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Any:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[str]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Optional[int]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Union[str, Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Union[str, Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> str:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Dict = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Tuple:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : str = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Dict:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> Dict:
requires_backends(cls , ['''torch'''])
class lowerCamelCase_ ( metaclass=__lowercase ):
'''simple docstring'''
a__ : Union[str, Any] = ["torch"]
def __init__( self , *__lowercase , **__lowercase) -> Optional[Any]:
requires_backends(self , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> List[Any]:
requires_backends(cls , ['''torch'''])
@classmethod
def UpperCamelCase__ ( cls , *__lowercase , **__lowercase) -> int:
requires_backends(cls , ['''torch'''])
| 352
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv3FeatureExtractor''']
__lowercase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
| 0
|
def A (__A : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ = generate_pascal_triangle(__A )
for row_idx in range(__A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCAmelCase_ = []
for current_row_idx in range(__A ):
UpperCAmelCase_ = populate_current_row(__A , __A )
triangle.append(__A )
return triangle
def A (__A : list[list[int]] , __A : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , __A ):
calculate_current_element(
__A , __A , __A , __A )
return current_row
def A (__A : list[list[int]] , __A : list[int] , __A : int , __A : int , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , __A ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(__A , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(__A )
return result
def A () -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__A : Callable , __A : int ) -> None:
UpperCAmelCase_ = F"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__A , __A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 51
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Dict = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
import datasets
UpperCamelCase_ ="""\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCamelCase_ ="""\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCamelCase_ ="""
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def a_ ( _lowercase , _lowercase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ), codebase_urls=[], reference_urls=[], format='''numpy''', )
def snake_case ( self : Optional[Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return {"accuracy": simple_accuracy(lowerCAmelCase__, lowerCAmelCase__ )}
| 363
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def a_ ( _lowercase ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 0
|
snake_case : Optional[Any] = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
snake_case : Dict = {value: key for key, value in MORSE_CODE_DICT.items()}
def __lowercase ( __lowerCAmelCase : str ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __lowercase ( __lowerCAmelCase : str ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def __lowercase ( ):
a__ = 'Morse code here!'
print(__lowerCAmelCase )
a__ = encrypt(__lowerCAmelCase )
print(__lowerCAmelCase )
a__ = decrypt(__lowerCAmelCase )
print(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 240
|
import argparse
snake_case : int = '''docs/source/_static/js/custom.js'''
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
with open(__lowerCAmelCase , encoding='utf-8' , newline='\n' ) as f:
a__ = f.readlines()
a__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
a__ = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
snake_case : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 240
| 1
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase="" , lowerCamelCase="train" ):
assert os.path.isdir(lowerCamelCase )
__a = []
__a = os.listdir(lowerCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__a = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isfile(lowerCamelCase ):
continue
self.documents.append(lowerCamelCase )
def __len__( self ):
return len(self.documents )
def __getitem__( self , lowerCamelCase ):
__a = self.documents[idx]
__a = document_path.split("/" )[-1]
with open(lowerCamelCase , encoding="utf-8" ) as source:
__a = source.read()
__a , __a = process_story(lowerCamelCase )
return document_name, story_lines, summary_lines
def _lowerCamelCase( a ):
__a = list(filter(lambda a : len(a ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
__a = [_add_missing_period(a ) for line in nonempty_lines]
# gather article lines
__a = []
__a = deque(a )
while True:
try:
__a = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(a )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__a = list(filter(lambda a : not t.startswith("@highlight" ) , a ) )
return story_lines, summary_lines
def _lowerCamelCase( a ):
__a = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _lowerCamelCase( a , a , a ):
if len(a ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(a )) )
return sequence
def _lowerCamelCase( a , a ):
__a = torch.ones_like(a )
__a = sequence == pad_token_id
__a = 0
return mask
def _lowerCamelCase( a , a , a ):
__a = [tokenizer.encode(a ) for line in story_lines]
__a = [token for sentence in story_lines_token_ids for token in sentence]
__a = [tokenizer.encode(a ) for line in summary_lines]
__a = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _lowerCamelCase( a , a ):
__a = []
for sequence in batch:
__a = -1
__a = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(a )
return torch.tensor(a )
| 365
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:str = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """sew-d"""
def __init__( self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase=2 , lowerCamelCase=512 , lowerCamelCase=256 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("p2c", "c2p") , lowerCamelCase="layer_norm" , lowerCamelCase="gelu_python" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1E-7 , lowerCamelCase=1E-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase="mean" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = squeeze_factor
__a = max_position_embeddings
__a = position_buckets
__a = share_att_key
__a = relative_attention
__a = norm_rel_ebd
__a = list(lowerCamelCase )
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layer_norm_eps
__a = feature_layer_norm_eps
__a = initializer_range
__a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# sequence classification
__a = use_weighted_layer_sum
__a = classifier_proj_size
@property
def a__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 268
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = CycleDiffusionPipeline
snake_case__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
snake_case__ : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"})
snake_case__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Tuple ) -> str:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1_0_0_0 , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=0 ) -> Any:
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCAmelCase__ , "half" ):
__SCREAMING_SNAKE_CASE = module.half()
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase_ ( self : int ) -> Tuple:
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase_ ( self : int ) -> str:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
__SCREAMING_SNAKE_CASE = init_image.resize((5_1_2, 5_1_2) )
__SCREAMING_SNAKE_CASE = "CompVis/stable-diffusion-v1-4"
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder="scheduler" )
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(
UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = "A black colored car"
__SCREAMING_SNAKE_CASE = "A blue colored car"
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=UpperCAmelCase__ , source_prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase__ , output_type="np" , )
__SCREAMING_SNAKE_CASE = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
__SCREAMING_SNAKE_CASE = init_image.resize((5_1_2, 5_1_2) )
__SCREAMING_SNAKE_CASE = "CompVis/stable-diffusion-v1-4"
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder="scheduler" )
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = "A black colored car"
__SCREAMING_SNAKE_CASE = "A blue colored car"
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=UpperCAmelCase__ , source_prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase__ , output_type="np" , )
__SCREAMING_SNAKE_CASE = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 54
|
from jiwer import compute_measures
import datasets
__snake_case : Dict ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__snake_case : Optional[Any] ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__snake_case : Any ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/jitsi/jiwer/'''] ,reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] ,)
def lowerCAmelCase__ (self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
if concatenate_texts:
return compute_measures(__lowerCamelCase ,__lowerCamelCase )["wer"]
else:
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = 0
for prediction, reference in zip(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Dict = compute_measures(__lowerCamelCase ,__lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 129
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""input_values""", """padding_mask"""]
def __init__( self , A_ = 1 , A_ = 24000 , A_ = 0.0 , A_ = None , A_ = None , **A_ , )-> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
UpperCamelCase = chunk_length_s
UpperCamelCase = overlap
@property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , A_ , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , )-> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
UpperCamelCase = True
UpperCamelCase = bool(
isinstance(A_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray(A_ ).T]
# verify inputs are valid
for idx, example in enumerate(A_ ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
UpperCamelCase = None
UpperCamelCase = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCamelCase = min(array.shape[0] for array in raw_audio )
UpperCamelCase = int(np.floor(max_length / self.chunk_stride ) )
UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCamelCase = max(array.shape[0] for array in raw_audio )
UpperCamelCase = int(np.ceil(max_length / self.chunk_stride ) )
UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCamelCase = 'max_length'
else:
UpperCamelCase = input_values
# normal padding on batch
if padded_inputs is None:
UpperCamelCase = self.pad(
A_ , max_length=A_ , truncation=A_ , padding=A_ , return_attention_mask=A_ , )
if padding:
UpperCamelCase = padded_inputs.pop('attention_mask' )
UpperCamelCase = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
UpperCamelCase = example[..., None]
input_values.append(example.T )
UpperCamelCase = input_values
if return_tensors is not None:
UpperCamelCase = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 251
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """timesformer"""
def __init__( self , A_=224 , A_=16 , A_=3 , A_=8 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-6 , A_=True , A_="divided_space_time" , A_=0 , **A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = num_frames
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = qkv_bias
UpperCamelCase = attention_type
UpperCamelCase = drop_path_rate
| 251
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case_ :
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : Dict=13 , lowercase_ : Optional[Any]=10 , lowercase_ : int=3 , lowercase_ : str=2 , lowercase_ : Tuple=2 , lowercase_ : int=True , lowercase_ : int=True , lowercase_ : Optional[int]=32 , lowercase_ : List[Any]=5 , lowercase_ : Optional[Any]=4 , lowercase_ : List[Any]=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : str=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[str]="divided_space_time" , lowercase_ : Union[str, Any]=None , ) -> List[str]:
lowercase__ : int = parent
lowercase__ : str = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : List[str] = patch_size
lowercase__ : Optional[Any] = num_frames
lowercase__ : Any = is_training
lowercase__ : Union[str, Any] = use_labels
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : int = attention_type
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[int] = scope
lowercase__ : Optional[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase__ : Optional[int] = (image_size // patch_size) ** 2
lowercase__ : Tuple = (num_frames) * self.num_patches_per_frame + 1
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
lowercase__ : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase__ : Dict = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase__ : int = self.num_labels
return config
def __UpperCamelCase ( self : Tuple , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[str] ) -> Optional[int]:
lowercase__ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Dict = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> Tuple:
lowercase__ : Tuple = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Tuple = model(lowercase_ )
# verify the logits shape
lowercase__ : Optional[int] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : int = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__A : Dict = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__A : str = False
__A : Optional[Any] = False
__A : Tuple = False
__A : Union[str, Any] = False
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowercase__ : Optional[Any] = TimesformerModelTester(self )
lowercase__ : Tuple = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Any=False ) -> Union[str, Any]:
lowercase__ : Dict = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def __UpperCamelCase ( self : str ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(lowercase_ )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def __UpperCamelCase ( self : Tuple ) -> Tuple:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Tuple = self.model_tester.seq_length
lowercase__ : Any = self.model_tester.num_frames
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = False
lowercase__ : Tuple = True
lowercase__ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Dict = True
lowercase__ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase__ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : List[str] = True
lowercase__ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowercase_ : str , lowercase_ : Any , lowercase_ : str ):
lowercase__ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase__ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : Any = outputs.hidden_states
lowercase__ : Any = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase__ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowercase_ ( ):
lowercase__ : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset")
lowercase__ : Optional[Any] = np.load(_lowerCamelCase)
return list(_lowerCamelCase)
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[int] ) -> str:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
lowercase__ : Optional[int] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
lowercase_ )
lowercase__ : str = self.default_image_processor
lowercase__ : str = prepare_video()
lowercase__ : Any = image_processor(video[:8] , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**lowercase_ )
# verify the logits
lowercase__ : Optional[Any] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase__ : List[str] = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 87
|
"""simple docstring"""
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
a : Dict = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
a : Dict = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_lowercase ).content
if __name__ == "__main__":
a : str = input('''Enter Video/IGTV url: ''').strip()
a : Any = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 105
| 0
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase: Any = get_tests_dir("fixtures/test_sentencepiece.model")
__lowercase: List[str] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
__lowercase: Dict = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase):
_lowerCamelCase : List[str] = CamembertTokenizer
_lowerCamelCase : int = CamembertTokenizerFast
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : str = True
def lowercase_ ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = CamembertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = '''<pad>'''
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ), __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ), __lowercase )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<s>NOTUSED" )
self.assertEqual(vocab_keys[1], "<pad>" )
self.assertEqual(vocab_keys[-1], "<mask>" )
self.assertEqual(len(__lowercase ), 1004 )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1005 )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = CamembertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
UpperCamelCase__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCamelCase__ = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ = tokenizer.encode(__lowercase )
UpperCamelCase__ = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
UpperCamelCase__ = tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
UpperCamelCase__ = rust_tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
self.assertListEqual(__lowercase, __lowercase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(__lowercase )
UpperCamelCase__ = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
def lowercase_ ( self : Any ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ = tokenizer.tokenize(__lowercase )
UpperCamelCase__ = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
UpperCamelCase__ = tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
UpperCamelCase__ = rust_tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
self.assertListEqual(__lowercase, __lowercase )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(__lowercase )
UpperCamelCase__ = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
@slow
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCamelCase__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase, model_name="camembert-base", revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf", sequences=__lowercase, )
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase : List[str] = ["""small""", """medium""", """large"""]
lowerCAmelCase : int = """lm_head.decoder.weight"""
lowerCAmelCase : List[str] = """lm_head.weight"""
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = torch.load(_lowerCAmelCase )
lowerCamelCase = d.pop(_lowerCAmelCase )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
lowerCAmelCase : List[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase : int = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
lowerCAmelCase : List[Any] = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 291
|
def _lowerCAmelCase (_lowerCAmelCase):
if n_term == "":
return []
UpperCamelCase_ = []
for temp in range(int(_lowerCAmelCase)):
series.append(f"""1/{temp + 1}""" if series else "1")
return series
if __name__ == "__main__":
UpperCAmelCase : Optional[int] =input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 128
| 0
|
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list:
if len(__UpperCAmelCase ) <= 1:
return [tuple(__UpperCAmelCase )]
SCREAMING_SNAKE_CASE_ = []
def generate(__UpperCAmelCase : int , __UpperCAmelCase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __UpperCAmelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = arr[k - 1], arr[i]
else: # k is odd
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = arr[k - 1], arr[0]
generate(k - 1 , __UpperCAmelCase )
generate(len(__UpperCAmelCase ) , __UpperCAmelCase )
return res
if __name__ == "__main__":
lowerCamelCase__ : int = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase__ : Any = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 210
|
import random
from typing import Any
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list[Any]:
for _ in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 210
| 1
|
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> Union[str, Any]:
_lowercase : Dict = len(A__ )
# We need to create solution object to save path.
_lowercase : Dict = [[0 for _ in range(A__ )] for _ in range(A__ )]
_lowercase : Optional[int] = run_maze(A__ , 0 , 0 , A__ )
if solved:
print("\n".join(str(A__ ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _A ( snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]:
_lowercase : Any = len(A__ )
# Final check point.
if i == j == (size - 1):
_lowercase : Optional[int] = 1
return True
_lowercase : Optional[int] = (not i < 0) and (not j < 0) # Check lower bounds
_lowercase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowercase : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowercase : List[str] = 1
# check for directions
if (
run_maze(A__ , i + 1 , A__ , A__ )
or run_maze(A__ , A__ , j + 1 , A__ )
or run_maze(A__ , i - 1 , A__ , A__ )
or run_maze(A__ , A__ , j - 1 , A__ )
):
return True
_lowercase : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Tuple = embeddings_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs
UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__magic_name__ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ : List[Any] = layer_type
UpperCAmelCase_ : int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ):
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple()
def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
if isinstance(lowerCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" )
# forward pass
UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
| 268
| 0
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = len(set_a.intersection(__SCREAMING_SNAKE_CASE ) )
if alternative_union:
lowercase_ : Dict = len(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Union[str, Any] = len(set_a.union(__SCREAMING_SNAKE_CASE ) )
return intersection / union
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
lowercase_ : Dict = [element for element in set_a if element in set_b]
if alternative_union:
lowercase_ : Any = len(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) / union
else:
lowercase_ : str = set_a + [element for element in set_b if element not in set_a]
return len(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
return None
if __name__ == "__main__":
_lowercase : List[str] = {"a", "b", "c", "d", "e"}
_lowercase : Dict = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 264
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 264
| 1
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Union[str, Any] = '''linear'''
A : int = '''cosine'''
A : Optional[Any] = '''cosine_with_restarts'''
A : Optional[int] = '''polynomial'''
A : str = '''constant'''
A : Union[str, Any] = '''constant_with_warmup'''
A : Optional[Any] = '''piecewise_constant'''
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
return LambdaLR(__UpperCamelCase ,lambda __UpperCamelCase : 1 ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1.0 ,__UpperCamelCase ) )
return 1.0
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: str ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = rule_str.split(':' )
SCREAMING_SNAKE_CASE : int = int(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = float(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = value
SCREAMING_SNAKE_CASE : Any = float(rule_list[-1] )
def create_rules_function(__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Any] ):
def rule_func(__UpperCamelCase: int ) -> float:
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE : Any = create_rules_function(__UpperCamelCase ,__UpperCamelCase )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: int=-1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
return max(
0.0 ,float(num_training_steps - current_step ) / float(max(1 ,num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 0.5 ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: Any ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : str = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * float(__UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int = 1 ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: Dict ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : int = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Optional[Any]=1e-7 ,__UpperCamelCase: Dict=1.0 ,__UpperCamelCase: Optional[Any]=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE : List[str] = lr_init - lr_end
SCREAMING_SNAKE_CASE : Optional[Any] = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
UpperCamelCase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowercase__( __UpperCamelCase: Union[str, SchedulerType] ,__UpperCamelCase: Optimizer ,__UpperCamelCase: Optional[str] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: int = 1 ,__UpperCamelCase: float = 1.0 ,__UpperCamelCase: int = -1 ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = SchedulerType(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCamelCase ,last_epoch=__UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCamelCase ,step_rules=__UpperCamelCase ,last_epoch=__UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,last_epoch=__UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,num_cycles=__UpperCamelCase ,last_epoch=__UpperCamelCase ,)
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,power=__UpperCamelCase ,last_epoch=__UpperCamelCase ,)
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,last_epoch=__UpperCamelCase )
| 251
|
'''simple docstring'''
from __future__ import annotations
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [True] * limit
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
SCREAMING_SNAKE_CASE : Any = i * 2
while index < limit:
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = index + i
SCREAMING_SNAKE_CASE : Tuple = [2]
for i in range(3 ,__UpperCamelCase ,2 ):
if is_prime[i]:
primes.append(__UpperCamelCase )
return primes
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = prime_sieve(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i in range(len(__UpperCamelCase ) ):
for j in range(i + length ,len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : Optional[int] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE : Dict = j - i
SCREAMING_SNAKE_CASE : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 251
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Dict = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__UpperCamelCase : List[str] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__UpperCamelCase : Optional[Any] = '''▁'''
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _snake_case , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case="[CLS]" , _snake_case="[SEP]" , _snake_case="<unk>" , _snake_case="[SEP]" , _snake_case="<pad>" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = (
AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case , normalized=_snake_case )
if isinstance(_snake_case , _snake_case )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.remove_space:
lowerCAmelCase = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('NFKD' , _snake_case )
lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.preprocess_text(_snake_case )
lowerCAmelCase = self.sp_model.encode(_snake_case , out_type=_snake_case )
lowerCAmelCase = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.PieceToId(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.IdToPiece(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(_snake_case )
lowerCAmelCase = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 309
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : Dict , A : Optional[int]=7 , A : Tuple=3 , A : Optional[Any]=10 , A : int=18 , A : Dict=30 , A : List[str]=400 , A : int=True , A : Optional[Any]=None , A : Optional[Any]=True , A : List[Any]=[0.5, 0.5, 0.5] , A : List[str]=[0.5, 0.5, 0.5] , A : Optional[int]=None , ):
_UpperCAmelCase : Dict = size if size is not None else {"shortest_edge": 18}
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Optional[Any] = num_frames
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : Any = max_resolution
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : str = size
_UpperCAmelCase : List[Any] = do_normalize
_UpperCAmelCase : Any = image_mean
_UpperCAmelCase : Tuple = image_std
_UpperCAmelCase : Any = crop_size
def _A ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = VivitImageProcessor if is_vision_available() else None
def _A ( self : int ):
_UpperCAmelCase : Tuple = VivitImageProcessingTester(self )
@property
def _A ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "image_mean" ) )
self.assertTrue(hasattr(A , "image_std" ) )
self.assertTrue(hasattr(A , "do_normalize" ) )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "do_center_crop" ) )
self.assertTrue(hasattr(A , "size" ) )
def _A ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _A ( self : Tuple ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase : Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[int] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for video in video_inputs:
self.assertIsInstance(A , A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 31
| 0
|
"""simple docstring"""
import argparse
import os
import re
_SCREAMING_SNAKE_CASE : List[str] = """src/diffusers"""
# Pattern that looks at the indentation in a line.
_SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : Any = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : List[str] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : str = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"""\[([^\]]+)\]""")
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : str =_re_indent.search(UpperCAmelCase )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Union[str, Any]="" , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ : int =0
UpperCamelCase__ : Union[str, Any] =code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase ):
index += 1
UpperCamelCase__ : Optional[int] =['''\n'''.join(lines[:index] )]
else:
UpperCamelCase__ : List[Any] =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ : Dict =[lines[index]]
index += 1
while index < len(UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(UpperCAmelCase ) )
if index < len(UpperCAmelCase ) - 1:
UpperCamelCase__ : Optional[Any] =[lines[index + 1]]
index += 1
else:
UpperCamelCase__ : List[str] =[]
else:
blocks.append('''\n'''.join(UpperCAmelCase ) )
UpperCamelCase__ : List[Any] =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase ) > 0:
blocks.append('''\n'''.join(UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( UpperCAmelCase : str ):
'''simple docstring'''
def _inner(UpperCAmelCase : Dict ):
return key(UpperCAmelCase ).lower().replace('''_''' , '''''' )
return _inner
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Dict=None ):
'''simple docstring'''
def noop(UpperCAmelCase : Optional[Any] ):
return x
if key is None:
UpperCamelCase__ : int =noop
# Constants are all uppercase, they go first.
UpperCamelCase__ : List[str] =[obj for obj in objects if key(UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ : Dict =[obj for obj in objects if key(UpperCAmelCase )[0].isupper() and not key(UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ : int =[obj for obj in objects if not key(UpperCAmelCase )[0].isupper()]
UpperCamelCase__ : Optional[int] =ignore_underscore(UpperCAmelCase )
return sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
def _replace(UpperCAmelCase : Union[str, Any] ):
UpperCamelCase__ : List[str] =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ : Tuple =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] ) + "]"
UpperCamelCase__ : List[Any] =import_statement.split('''\n''' )
if len(UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ : List[str] =2 if lines[1].strip() == '''[''' else 1
UpperCamelCase__ : List[str] =[(i, _re_strip_line.search(UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ : List[str] =sort_objects(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )
UpperCamelCase__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ : Dict =_re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ : Tuple =keys[:-1]
UpperCamelCase__ : Optional[Any] =get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] )
return "\n".join(UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ : List[str] =_re_bracket_content.sub(_replace , UpperCAmelCase )
return import_statement
def _lowerCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=True ):
'''simple docstring'''
with open(UpperCAmelCase , '''r''' ) as f:
UpperCamelCase__ : int =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ : Optional[int] =split_code_in_indented_blocks(
UpperCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ : Dict =main_blocks[block_idx]
UpperCamelCase__ : List[str] =block.split('''\n''' )
# Get to the start of the imports.
UpperCamelCase__ : str =0
while line_idx < len(UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ : Optional[int] =len(UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[line_idx:-1] )
UpperCamelCase__ : Tuple =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ : str =split_code_in_indented_blocks(UpperCAmelCase , indent_level=UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ : str =_re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ : Tuple =[(pattern.search(UpperCAmelCase ).groups()[0] if pattern.search(UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ : List[Any] =[(i, key) for i, key in enumerate(UpperCAmelCase ) if key is not None]
UpperCamelCase__ : Optional[Any] =[x[0] for x in sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ : Union[str, Any] =0
UpperCamelCase__ : str =[]
for i in range(len(UpperCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ : Optional[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write('''\n'''.join(UpperCAmelCase ) )
def _lowerCAmelCase ( UpperCAmelCase : Dict=True ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =[]
for root, _, files in os.walk(UpperCAmelCase ):
if "__init__.py" in files:
UpperCamelCase__ : List[Any] =sort_imports(os.path.join(UpperCAmelCase , '''__init__.py''' ) , check_only=UpperCAmelCase )
if result:
UpperCamelCase__ : int =[os.path.join(UpperCAmelCase , '''__init__.py''' )]
if len(UpperCAmelCase ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCAmelCase )} files, run `make style`.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 157
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_SCREAMING_SNAKE_CASE : Optional[int] = """path-to-your-trained-model"""
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
_SCREAMING_SNAKE_CASE : Dict = """A photo of sks dog in a bucket"""
_SCREAMING_SNAKE_CASE : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 157
| 1
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__a : List[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 210
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__a : List[str] = Lock()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__lowercase = min(lowercase , lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__lowercase = max(lowercase , lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
__lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
for i in range(1 , len(lowercase ) - 1 ):
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
process_array_.append(
Process(
target=lowercase , args=(
len(lowercase ) - 1,
arr[len(lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase ) ):
__lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*lowercase )
__lowercase = odd_even_transposition(lowercase )
print('''Sorted List\n''' )
print(*lowercase )
if __name__ == "__main__":
main()
| 210
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__SCREAMING_SNAKE_CASE ={
"yjernite/retribert-base-uncased": 512,
}
__SCREAMING_SNAKE_CASE ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RetriBertTokenizer
lowercase = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Dict:
'''simple docstring'''
super().__init__(
__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars
):
lowercase_ : List[str] = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) )
lowercase_ : Tuple = do_lower_case
lowercase_ : List[Any] = strip_accents
lowercase_ : Dict = tokenize_chinese_chars
lowercase_ : Any = normalizer_class(**__UpperCamelCase )
lowercase_ : str = do_lower_case
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
lowercase_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : Dict = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ : str = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 321
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
lowercase_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = 'sshleifer/tiny-gpt2'
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = 'sshleifer/tiny-gpt2'
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
lowercase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] )
lowercase_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = 'patrickvonplaten/t5-tiny-random'
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] )
lowercase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,)
lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'current' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,)
lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase )
lowercase_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
| 321
| 1
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowercase ( _a ):
snake_case_ : Optional[int] = checkpoints.load_tax_checkpoint(_a )
snake_case_ : Any = flatten_dict(_a )
return flax_params
def __lowercase ( _a ):
snake_case_ : str = {}
snake_case_ : Optional[int] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
snake_case_ : List[Any] = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : str = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(_a , _a )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Any = new_key.replace(_a , _a )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Union[str, Any] = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _a )
snake_case_ : Optional[int] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : List[Any] = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _a )
snake_case_ : List[Any] = flax_dict[key]
snake_case_ : Optional[int] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Any = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : Any = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowercase ( _a , _a , _a=False , _a=False ):
snake_case_ : List[Any] = get_flax_param(_a )
if not use_large:
snake_case_ : Dict = PixaStructVisionConfig()
snake_case_ : Dict = PixaStructTextConfig()
else:
snake_case_ : Optional[int] = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
snake_case_ : Optional[int] = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
snake_case_ : int = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_a )
snake_case_ : str = PixaStructForConditionalGeneration(_a )
snake_case_ : List[str] = rename_and_convert_flax_params(_a )
model.load_state_dict(_a )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
snake_case_ : List[Any] = PixaStructImageProcessor()
snake_case_ : Union[str, Any] = PixaStructProcessor(image_processor=_a , tokenizer=_a )
if use_large:
snake_case_ : Tuple = 4_096
snake_case_ : Union[str, Any] = True
# mkdir if needed
os.makedirs(_a , exist_ok=_a )
model.save_pretrained(_a )
processor.save_pretrained(_a )
print('''Model saved in {}'''.format(_a ) )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
lowercase__ : int = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 264
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : List[str] = ''''''
else:
snake_case_ : Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Dict = in_proj_bias[: config.hidden_size]
snake_case_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : str = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a , _a , _a ):
snake_case_ : Union[str, Any] = dct.pop(_a )
snake_case_ : Union[str, Any] = val
def __lowercase ( ):
snake_case_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a=False ):
snake_case_ : str = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_a , )
snake_case_ : Tuple = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1_000 )
snake_case_ : int = False
# load original model from timm
snake_case_ : str = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
snake_case_ : int = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
snake_case_ : Optional[Any] = '''huggingface/label-files'''
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Dict = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ : Optional[Any] = ViTHybridModel(_a ).eval()
else:
snake_case_ : Any = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
snake_case_ : Optional[Any] = create_transform(**resolve_data_config({} , model=_a ) )
snake_case_ : List[Any] = transform.transforms
snake_case_ : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ : List[Any] = ViTHybridImageProcessor(
do_resize=_a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Optional[int] = transform(_a ).unsqueeze(0 )
snake_case_ : int = processor(_a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_a , _a )
# verify logits
with torch.no_grad():
snake_case_ : List[str] = model(_a )
snake_case_ : Any = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case_ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ : int = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_a )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowercase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 264
| 1
|
'''simple docstring'''
from __future__ import annotations
import queue
class _SCREAMING_SNAKE_CASE :
def __init__( self : int , a__ : Dict ):
__magic_name__ = data
__magic_name__ = None
__magic_name__ = None
def UpperCamelCase ( ) -> str:
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
__magic_name__ = input('''Enter the value of the root node: ''' ).strip().lower()
__magic_name__ = queue.Queue()
__magic_name__ = TreeNode(int(lowercase__ ) )
q.put(lowercase__ )
while not q.empty():
__magic_name__ = q.get()
__magic_name__ = F'''Enter the left node of {node_found.data}: '''
__magic_name__ = input(lowercase__ ).strip().lower() or 'n'
if check == "n":
return tree_node
__magic_name__ = TreeNode(int(lowercase__ ) )
__magic_name__ = left_node
q.put(lowercase__ )
__magic_name__ = F'''Enter the right node of {node_found.data}: '''
__magic_name__ = input(lowercase__ ).strip().lower() or 'n'
if check == "n":
return tree_node
__magic_name__ = TreeNode(int(lowercase__ ) )
__magic_name__ = right_node
q.put(lowercase__ )
raise
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def UpperCamelCase ( a ) -> Dict:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
__magic_name__ = queue.Queue()
q.put(lowercase__ )
while not q.empty():
__magic_name__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
__magic_name__ = queue.Queue()
q.put(lowercase__ )
while not q.empty():
__magic_name__ = []
while not q.empty():
__magic_name__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowercase__ )
def UpperCamelCase ( a ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
__magic_name__ = []
__magic_name__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(lowercase__ )
__magic_name__ = n.left
# end of while means current node doesn't have left child
__magic_name__ = stack.pop()
# start to traverse its right child
__magic_name__ = n.right
def UpperCamelCase ( a ) -> Dict:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
__magic_name__ = []
__magic_name__ = node
while n or stack:
while n:
stack.append(lowercase__ )
__magic_name__ = n.left
__magic_name__ = stack.pop()
print(n.data , end=''',''' )
__magic_name__ = n.right
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
__magic_name__ = [], []
__magic_name__ = node
stacka.append(lowercase__ )
while stacka: # to find the reversed order of post order, store it in stack2
__magic_name__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowercase__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def UpperCamelCase ( a = "" , a=50 , a="*" ) -> Any:
'''simple docstring'''
if not s:
return "\n" + width * char
__magic_name__ = divmod(width - len(lowercase__ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowerCAmelCase = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 362
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Union[str, Any]=7 , a__ : Dict=3 , a__ : Optional[Any]=18 , a__ : Optional[Any]=30 , a__ : Tuple=400 , a__ : Optional[int]=True , a__ : int=None , a__ : Union[str, Any]=True , a__ : Optional[Any]=None , a__ : str=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : Tuple=[0.5, 0.5, 0.5] , a__ : Union[str, Any]=False , ):
__magic_name__ = size if size is not None else {'''height''': 20, '''width''': 20}
__magic_name__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_reduce_labels
def snake_case__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase ( ) -> str:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(dataset[0]['''file'''] )
__magic_name__ = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(ds[0]['''file'''] )
__magic_name__ = Image.open(ds[1]['''file'''] )
__magic_name__ = Image.open(ds[2]['''file'''] )
__magic_name__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case__ ( self : Dict ):
__magic_name__ = BeitImageProcessingTester(self )
@property
def snake_case__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a__ , '''center_crop''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
def snake_case__ ( self : int ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Dict ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[str] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Union[str, Any] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
__magic_name__ = []
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_batch_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def snake_case__ ( self : Any ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__magic_name__ = True
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 98
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """spiece.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
UpperCamelCase_ = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
UpperCamelCase_ = """▁"""
class a_ (_a ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case_ , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_="[CLS]" , snake_case_="[SEP]" , snake_case_="<unk>" , snake_case_="[SEP]" , snake_case_="<pad>" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_ = None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCAmelCase : List[Any] = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
_lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_lowerCAmelCase : Any = do_lower_case
_lowerCAmelCase : int = remove_space
_lowerCAmelCase : List[str] = keep_accents
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def __UpperCamelCase ( self ):
return len(self.sp_model )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
_lowerCAmelCase : Tuple = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , snake_case_ ):
if self.remove_space:
_lowerCAmelCase : Optional[int] = """ """.join(inputs.strip().split() )
else:
_lowerCAmelCase : str = inputs
_lowerCAmelCase : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize("""NFKD""" , snake_case_ )
_lowerCAmelCase : List[Any] = """""".join([c for c in outputs if not unicodedata.combining(snake_case_ )] )
if self.do_lower_case:
_lowerCAmelCase : Optional[int] = outputs.lower()
return outputs
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = self.preprocess_text(snake_case_ )
_lowerCAmelCase : Any = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
_lowerCAmelCase : int = []
for piece in pieces:
if len(snake_case_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : Optional[Any] = cur_pieces[1:]
else:
_lowerCAmelCase : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case_ )
else:
new_pieces.append(snake_case_ )
return new_pieces
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.PieceToId(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.IdToPiece(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(snake_case_ )
_lowerCAmelCase : str = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : List[str] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
def lowerCAmelCase__ ( a__: Dict , a__: Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
_UpperCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_UpperCAmelCase = min(a__ , a__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 185
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int ) -> tuple[int | None, int | None, float]:
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_UpperCAmelCase = (low + high) // 2
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , a__ , a__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , mid + 1 , a__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int , a__: int ) -> tuple[int, int, float]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1
_UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1
_UpperCAmelCase = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_UpperCAmelCase = summ
_UpperCAmelCase = i
_UpperCAmelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_UpperCAmelCase = summ
_UpperCAmelCase = i
return max_left, max_right, (left_sum + right_sum)
def lowerCAmelCase__ ( a__: int ) -> float:
'''simple docstring'''
_UpperCAmelCase = [randint(1 , a__ ) for _ in range(a__ )]
_UpperCAmelCase = time.time()
max_subarray(a__ , 0 , input_size - 1 )
_UpperCAmelCase = time.time()
return end - start
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
_UpperCAmelCase = [time_max_subarray(a__ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '\t\t' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 185
| 1
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_snake_case = open # noqa: we just need to have a builtin inside this module to test it properly
| 157
|
from math import sqrt
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = 0
for i in range(1, int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def _UpperCamelCase ( snake_case__ = 1_0000 ) -> int:
__UpperCAmelCase : List[str] = sum(
i
for i in range(1, snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 157
| 1
|
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''
__snake_case : Any = ''
__snake_case : int = ''
__snake_case : int = ''
def _UpperCAmelCase ( _UpperCamelCase : str ) -> None:
# authorize twitter, initialize tweepy
A_ = tweepy.OAuthHandler(_UpperCamelCase, _UpperCamelCase )
auth.set_access_token(_UpperCamelCase, _UpperCamelCase )
A_ = tweepy.API(_UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
A_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
A_ = api.user_timeline(screen_name=_UpperCamelCase, count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# save the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCamelCase ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
A_ = api.user_timeline(
screen_name=_UpperCamelCase, count=2_00, max_id=_UpperCamelCase )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# update the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
print(F'''...{len(_UpperCamelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
A_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''', '''w''' ) as f:
A_ = csv.writer(_UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 18
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE__ = {
'yjernite/retribert-base-uncased': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RetriBertTokenizer
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_lower_case
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 321
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 4000000 )-> int:
UpperCamelCase = []
UpperCamelCase ,UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = b, a + b
return sum(__UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 321
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ):
snake_case__ : Any = 0
snake_case__ : Optional[int] = len(snake_case_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case__ : int = i + 1
else:
snake_case__ : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 286
|
__lowerCamelCase : Optional[int] = """Tobias Carryer"""
from time import time
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] , __A : Dict=int(time() ) ): # noqa: B008
snake_case__ : List[Any] = multiplier
snake_case__ : Optional[int] = increment
snake_case__ : Optional[int] = modulo
snake_case__ : Union[str, Any] = seed
def _lowercase ( self : str ):
snake_case__ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase : int = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 286
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98
| 0
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
snake_case__ = get_logger()
snake_case__ = None
class UpperCamelCase_ (TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , **_lowerCamelCase : Dict ):
"""simple docstring"""
super().__init__(features=_lowerCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
f'Expected {device} to be a `str` not {type(_lowerCamelCase )}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
A_ : List[str] = device if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
A_ : int = str(jax.devices()[0] )
A_ : Tuple = jnp_array_kwargs
@staticmethod
def _a ( ):
"""simple docstring"""
import jax
return {str(_lowerCamelCase ): device for device in jax.devices()}
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_lowerCamelCase , _lowerCamelCase ) and column:
if all(
isinstance(_lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_lowerCamelCase , axis=0 )
return column
def _a ( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_lowerCamelCase , (str, bytes, type(_lowerCamelCase )) ):
return value
elif isinstance(_lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ : Optional[int] = {}
if isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ : str = {'''dtype''': jnp.intaa}
else:
A_ : int = {'''dtype''': jnp.intaa}
elif isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ : Optional[int] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_lowerCamelCase , PIL.Image.Image ):
A_ : List[Any] = np.asarray(_lowerCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ : Tuple = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _a ( self : List[Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_lowerCamelCase , '''__array__''' ) and not isinstance(_lowerCamelCase , jax.Array ):
A_ : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] )
elif isinstance(_lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(_lowerCamelCase )
def _a ( self : Union[str, Any] , _lowerCamelCase : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _lowerCamelCase , map_list=_lowerCamelCase )
def _a ( self : str , _lowerCamelCase : pa.Table ):
"""simple docstring"""
A_ : str = self.numpy_arrow_extractor().extract_row(_lowerCamelCase )
A_ : Optional[int] = self.python_features_decoder.decode_row(_lowerCamelCase )
return self.recursive_tensorize(_lowerCamelCase )
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
A_ : List[str] = self.numpy_arrow_extractor().extract_column(_lowerCamelCase )
A_ : Tuple = self.python_features_decoder.decode_column(_lowerCamelCase , pa_table.column_names[0] )
A_ : Tuple = self.recursive_tensorize(_lowerCamelCase )
A_ : List[Any] = self._consolidate(_lowerCamelCase )
return column
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
A_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(_lowerCamelCase )
A_ : Optional[Any] = self.python_features_decoder.decode_batch(_lowerCamelCase )
A_ : str = self.recursive_tensorize(_lowerCamelCase )
for column_name in batch:
A_ : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 4
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_lowerCAmelCase = 'CIDAS/clipseg-rd64-refined'
_lowerCAmelCase = 'image_segmenter'
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['image']
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' )
def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
A_ : Optional[int] = self.model(**_lowerCamelCase ).logits
return logits
def _a ( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : int = outputs.cpu().detach().numpy()
A_ : Tuple = 0
A_ : List[str] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 4
| 1
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
A__ : Optional[int] = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
A__ : Tuple = None
def UpperCAmelCase__ ( ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=UpperCAmelCase_ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=UpperCAmelCase_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> Optional[Any]:
__lowerCamelCase : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase : Union[str, Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Optional[int]:
def remove_articles(UpperCAmelCase_ : Optional[int] ):
return ARTICLES_REGEX.sub(' ' , UpperCAmelCase_ )
def white_space_fix(UpperCAmelCase_ : Dict ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase_ : Optional[int] ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase_ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase_ ) ) ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Optional[int]:
if not s:
return []
return normalize_answer(UpperCAmelCase_ ).split()
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ) -> int:
return int(normalize_answer(UpperCAmelCase_ ) == normalize_answer(UpperCAmelCase_ ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) -> Optional[int]:
__lowerCamelCase : List[Any] = get_tokens(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = get_tokens(UpperCAmelCase_ )
__lowerCamelCase : List[str] = collections.Counter(UpperCAmelCase_ ) & collections.Counter(UpperCAmelCase_ )
__lowerCamelCase : List[str] = sum(common.values() )
if len(UpperCAmelCase_ ) == 0 or len(UpperCAmelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase : str = 1.0 * num_same / len(UpperCAmelCase_ )
__lowerCamelCase : Dict = 1.0 * num_same / len(UpperCAmelCase_ )
__lowerCamelCase : int = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> Tuple:
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase : Union[str, Any] = qa['id']
__lowerCamelCase : List[str] = [t for t in qa['answers']['text'] if normalize_answer(UpperCAmelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase : Optional[int] = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
__lowerCamelCase : str = preds[qid]
# Take max over all gold answers
__lowerCamelCase : Optional[Any] = max(compute_exact(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers )
__lowerCamelCase : str = max(compute_fa(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) -> Tuple:
__lowerCamelCase : str = {}
for qid, s in scores.items():
__lowerCamelCase : Union[str, Any] = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase : int = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase : List[str] = s
return new_scores
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=None ) -> int:
if not qid_list:
__lowerCamelCase : List[str] = len(UpperCAmelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
__lowerCamelCase : List[Any] = len(UpperCAmelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ) -> Union[str, Any]:
for k in new_eval:
__lowerCamelCase : int = new_eval[k]
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Optional[Any]:
plt.step(UpperCAmelCase_ , UpperCAmelCase_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(UpperCAmelCase_ , UpperCAmelCase_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCAmelCase_ )
plt.savefig(UpperCAmelCase_ )
plt.clf()
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=None ) -> Dict:
__lowerCamelCase : int = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : na_probs[k] )
__lowerCamelCase : List[Any] = 0.0
__lowerCamelCase : Optional[int] = 1.0
__lowerCamelCase : List[Any] = 0.0
__lowerCamelCase : Optional[int] = [1.0]
__lowerCamelCase : Optional[int] = [0.0]
__lowerCamelCase : Union[str, Any] = 0.0
for i, qid in enumerate(UpperCAmelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase : List[Any] = true_pos / float(i + 1 )
__lowerCamelCase : str = true_pos / float(UpperCAmelCase_ )
if i == len(UpperCAmelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCAmelCase_ )
recalls.append(UpperCAmelCase_ )
if out_image:
plot_pr_curve(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return {"ap": 100.0 * avg_prec}
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) -> Any:
if out_image_dir and not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
__lowerCamelCase : str = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase : Optional[Any] = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
__lowerCamelCase : Optional[Any] = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
__lowerCamelCase : Optional[Any] = {k: float(UpperCAmelCase_ ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase : Tuple = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'pr_exact' )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'pr_f1' )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'pr_oracle' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ) -> Tuple:
if not qid_list:
return
__lowerCamelCase : List[Any] = [na_probs[k] for k in qid_list]
__lowerCamelCase : Union[str, Any] = np.ones_like(UpperCAmelCase_ ) / float(len(UpperCAmelCase_ ) )
plt.hist(UpperCAmelCase_ , weights=UpperCAmelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(UpperCAmelCase_ , F'na_prob_hist_{name}.png' ) )
plt.clf()
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) -> Optional[Any]:
__lowerCamelCase : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase : Tuple = num_no_ans
__lowerCamelCase : Tuple = cur_score
__lowerCamelCase : int = 0.0
__lowerCamelCase : List[str] = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : na_probs[k] )
for i, qid in enumerate(UpperCAmelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase : Optional[int] = scores[qid]
else:
if preds[qid]:
__lowerCamelCase : str = -1
else:
__lowerCamelCase : Any = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase : int = cur_score
__lowerCamelCase : Union[str, Any] = na_probs[qid]
return 100.0 * best_score / len(UpperCAmelCase_ ), best_thresh
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase : int = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : List[Any] = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = best_exact
__lowerCamelCase : List[str] = exact_thresh
__lowerCamelCase : Tuple = best_fa
__lowerCamelCase : int = fa_thresh
def UpperCAmelCase__ ( ) -> List[str]:
with open(OPTS.data_file ) as f:
__lowerCamelCase : Tuple = json.load(UpperCAmelCase_ )
__lowerCamelCase : Any = dataset_json['data']
with open(OPTS.pred_file ) as f:
__lowerCamelCase : Optional[Any] = json.load(UpperCAmelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase : Optional[int] = json.load(UpperCAmelCase_ )
else:
__lowerCamelCase : Union[str, Any] = {k: 0.0 for k in preds}
__lowerCamelCase : Optional[int] = make_qid_to_has_ans(UpperCAmelCase_ ) # maps qid to True/False
__lowerCamelCase : List[Any] = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = get_raw_scores(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Any = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh )
__lowerCamelCase : Tuple = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh )
__lowerCamelCase : Optional[Any] = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ )
if has_ans_qids:
__lowerCamelCase : int = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'HasAns' )
if no_ans_qids:
__lowerCamelCase : List[str] = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir )
histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
else:
print(json.dumps(UpperCAmelCase_ , indent=2 ) )
if __name__ == "__main__":
A__ : Optional[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 185
|
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A__ : List[str] = get_logger(__name__)
A__ : str = R"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class UpperCAmelCase_ :
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase_ :
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
for processor in self:
__lowerCamelCase : str = inspect.signature(processor.__call__ ).parameters
if len(SCREAMING_SNAKE_CASE_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
__lowerCamelCase : Tuple = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : int = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
__lowerCamelCase : Optional[int] = temperature
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : Dict = scores / self.temperature
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -float('Inf' ) , SCREAMING_SNAKE_CASE_ = 1 ) -> Union[str, Any]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
__lowerCamelCase : str = top_p
__lowerCamelCase : Tuple = filter_value
__lowerCamelCase : Tuple = min_tokens_to_keep
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase , __lowerCamelCase : Any = lax.top_k(SCREAMING_SNAKE_CASE_ , scores.shape[-1] )
__lowerCamelCase : int = jnp.full_like(SCREAMING_SNAKE_CASE_ , self.filter_value )
__lowerCamelCase : Tuple = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 ).cumsum(axis=-1 )
__lowerCamelCase : List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCamelCase : Tuple = jnp.roll(SCREAMING_SNAKE_CASE_ , 1 )
score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
# min tokens to keep
__lowerCamelCase : Any = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = jnp.where(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[-1]
return next_scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -float('Inf' ) , SCREAMING_SNAKE_CASE_ = 1 ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
__lowerCamelCase : List[str] = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = filter_value
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase , __lowerCamelCase : List[Any] = scores.shape
__lowerCamelCase : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCamelCase : int = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCamelCase , __lowerCamelCase : Tuple = lax.top_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCamelCase : List[Any] = topk_scores.flatten()
__lowerCamelCase : Union[str, Any] = topk_indices.flatten() + shift
__lowerCamelCase : Tuple = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = next_scores_flat.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return next_scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Any = bos_token_id
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
__lowerCamelCase : Optional[Any] = 1 - jnp.bool_(cur_len - 1 )
__lowerCamelCase : List[Any] = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Tuple = max_length
__lowerCamelCase : Any = eos_token_id
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : List[str] = jnp.full(scores.shape , -float('inf' ) )
__lowerCamelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCamelCase : List[str] = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
__lowerCamelCase : str = min_length
__lowerCamelCase : Optional[int] = eos_token_id
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
__lowerCamelCase : Optional[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCamelCase : str = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = begin_index
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCamelCase : str = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Tuple = list(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : int = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = dict(SCREAMING_SNAKE_CASE_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCamelCase : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCamelCase : str = force_token_array.at[index].set(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = jnp.intaa(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
def _force_token(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : List[str] = scores.shape[0]
__lowerCamelCase : Tuple = self.force_token_array[generation_idx]
__lowerCamelCase : List[Any] = jnp.ones_like(SCREAMING_SNAKE_CASE_ , dtype=scores.dtype ) * -float('inf' )
__lowerCamelCase : Any = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCamelCase : str = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (0, current_token) )
return new_scores
__lowerCamelCase : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE_ ) , lambda: scores , ) , )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = generate_config.eos_token_id
__lowerCamelCase : Dict = generate_config.no_timestamps_token_id
__lowerCamelCase : Tuple = generate_config.no_timestamps_token_id + 1
__lowerCamelCase : List[str] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(SCREAMING_SNAKE_CASE_ , 'max_initial_timestamp_index' ):
__lowerCamelCase : str = generate_config.max_initial_timestamp_index
else:
__lowerCamelCase : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCamelCase : Tuple = model_config.vocab_size
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCamelCase : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return jnp.where(
SCREAMING_SNAKE_CASE_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCamelCase : str = jnp.where(
SCREAMING_SNAKE_CASE_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCamelCase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
def handle_cumulative_probs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCamelCase : List[str] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Dict = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return scores
| 185
| 1
|
"""simple docstring"""
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]:
_lowerCAmelCase =data
_lowerCAmelCase =previous
_lowerCAmelCase =next_node
def __str__( self ) -> str:
return f'''{self.data}'''
def _lowerCAmelCase ( self ) -> int:
return self.data
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return self.next
def _lowerCAmelCase ( self ) -> Dict:
return self.previous
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =head
def __iter__( self ) -> Union[str, Any]:
return self
def _lowerCAmelCase ( self ) -> List[Any]:
if not self.current:
raise StopIteration
else:
_lowerCAmelCase =self.current.get_data()
_lowerCAmelCase =self.current.get_next()
return value
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Tuple:
_lowerCAmelCase =None # First node in list
_lowerCAmelCase =None # Last node in list
def __str__( self ) -> Union[str, Any]:
_lowerCAmelCase =self.head
_lowerCAmelCase =[]
while current is not None:
nodes.append(current.get_data() )
_lowerCAmelCase =current.get_next()
return " ".join(str(__UpperCAmelCase ) for node in nodes )
def __contains__( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =self.head
while current:
if current.get_data() == value:
return True
_lowerCAmelCase =current.get_next()
return False
def __iter__( self ) -> int:
return LinkedListIterator(self.head )
def _lowerCAmelCase ( self ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def _lowerCAmelCase ( self ) -> int:
if self.tail:
return self.tail.get_data()
return None
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
if self.head is None:
_lowerCAmelCase =node
_lowerCAmelCase =node
else:
self.insert_before_node(self.head , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.insert_after_node(self.tail , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
_lowerCAmelCase =Node(__UpperCAmelCase )
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.set_tail(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =node
_lowerCAmelCase =node.previous
if node.get_previous() is None:
_lowerCAmelCase =node_to_insert
else:
_lowerCAmelCase =node_to_insert
_lowerCAmelCase =node_to_insert
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =node
_lowerCAmelCase =node.next
if node.get_next() is None:
_lowerCAmelCase =node_to_insert
else:
_lowerCAmelCase =node_to_insert
_lowerCAmelCase =node_to_insert
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =1
_lowerCAmelCase =Node(__UpperCAmelCase )
_lowerCAmelCase =self.head
while node:
if current_position == position:
self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase )
return
current_position += 1
_lowerCAmelCase =node.next
self.insert_after_node(self.tail , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Node:
_lowerCAmelCase =self.head
while node:
if node.get_data() == item:
return node
_lowerCAmelCase =node.get_next()
raise Exception("""Node not found""" )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict:
if (node := self.get_node(__UpperCAmelCase )) is not None:
if node == self.head:
_lowerCAmelCase =self.head.get_next()
if node == self.tail:
_lowerCAmelCase =self.tail.get_previous()
self.remove_node_pointers(__UpperCAmelCase )
@staticmethod
def _lowerCAmelCase ( __UpperCAmelCase ) -> None:
if node.get_next():
_lowerCAmelCase =node.previous
if node.get_previous():
_lowerCAmelCase =node.next
_lowerCAmelCase =None
_lowerCAmelCase =None
def _lowerCAmelCase ( self ) -> Optional[Any]:
return self.head is None
def _lowerCamelCase() -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341
| 1
|
import sys
__lowerCamelCase : List[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 1
for digit in s:
product *= int(lowerCAmelCase )
return product
def _snake_case ( lowerCAmelCase : str = N ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = -sys.maxsize - 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = n[:1_3]
SCREAMING_SNAKE_CASE_ : Dict = 1_3
while cur_index < len(lowerCAmelCase ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE_ : str = substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE_ : Dict = max(lowerCAmelCase , str_eval(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18
|
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18
| 1
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase__ : List[str] =logging.get_logger(__name__)
def __lowercase ( a__ ) -> List[int]:
if isinstance(a__ , np.ndarray ):
return list(tensor.shape )
__SCREAMING_SNAKE_CASE = tf.shape(a__ )
if tensor.shape == tf.TensorShape(a__ ):
return dynamic
__SCREAMING_SNAKE_CASE = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a__ )]
def __lowercase ( a__ , a__ = None , a__ = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1E-9 , axis=a__ , name=a__ )
def __lowercase ( a__ , a__ , a__ , a__=1E-5 , a__=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a__ , a__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.nn.moments(a__ , axes=[axis] , keepdims=a__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__SCREAMING_SNAKE_CASE = [1] * inputs.shape.rank
__SCREAMING_SNAKE_CASE = shape_list(a__ )[axis]
__SCREAMING_SNAKE_CASE = tf.reshape(a__ , a__ )
__SCREAMING_SNAKE_CASE = tf.reshape(a__ , a__ )
# Compute layer normalization using the batch_normalization
# function.
__SCREAMING_SNAKE_CASE = tf.nn.batch_normalization(
a__ , a__ , a__ , offset=a__ , scale=a__ , variance_epsilon=a__ , )
return outputs
def __lowercase ( a__ , a__=0 , a__=-1 ) -> List[str]:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__SCREAMING_SNAKE_CASE = tf.shape(a__ )
__SCREAMING_SNAKE_CASE = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__SCREAMING_SNAKE_CASE = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(a__ , a__ )
def __lowercase ( a__ ) -> tf.Tensor:
if not isinstance(a__ , tf.Tensor ):
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(a__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__SCREAMING_SNAKE_CASE = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__SCREAMING_SNAKE_CASE = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__SCREAMING_SNAKE_CASE = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( a__ , a__ , a__ = "input_ids" ) -> None:
tf.debugging.assert_less(
a__ , tf.cast(a__ , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(a__ )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __lowercase ( a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__SCREAMING_SNAKE_CASE = [x for x in data if len(a__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
__SCREAMING_SNAKE_CASE = np.asarray(a__ )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = np.array_split(a__ , a__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__SCREAMING_SNAKE_CASE = np.array_split(a__ , a__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a__ ):
__SCREAMING_SNAKE_CASE = chunk_data
else:
__SCREAMING_SNAKE_CASE = data
def __lowercase ( a__ , a__ ) -> str:
if name in group.attrs:
__SCREAMING_SNAKE_CASE = [n.decode('utf8' ) if hasattr(a__ , 'decode' ) else n for n in group.attrs[name]]
else:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(a__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( a__ ) -> List[str]:
def _expand_single_ad_tensor(a__ ):
if isinstance(a__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(a__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a__ )
| 361
|
from __future__ import annotations
from collections.abc import Generator
def __lowercase ( ) -> Generator[int, None, None]:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 2
while True:
__SCREAMING_SNAKE_CASE = factor_map.pop(a__ , a__ )
if factor:
__SCREAMING_SNAKE_CASE = factor + prime
while x in factor_map:
x += factor
__SCREAMING_SNAKE_CASE = factor
else:
__SCREAMING_SNAKE_CASE = prime
yield prime
prime += 1
def __lowercase ( a__ = 1E10 ) -> int:
__SCREAMING_SNAKE_CASE = sieve()
__SCREAMING_SNAKE_CASE = 1
while True:
__SCREAMING_SNAKE_CASE = next(a__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(a__ )
n += 2
if __name__ == "__main__":
print(solution())
| 118
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 286
|
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A_ : str = False
if num < 0:
A_ : Dict = True
A_ : Union[str, Any] = -num
A_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 1
|
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class __snake_case ( __lowerCAmelCase ):
def __init__( self , *lowercase , **lowercase) -> Optional[Any]:
'''simple docstring'''
super().__init__(*lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Tuple = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase)
a__: int = self.values[key]
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowercase) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def lowerCamelCase_ ( self , lowercase , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
if not (
len(self.values[key]) == self.charge_factor and self.values.count(lowercase) == 0
):
return key
return super()._collision_resolution(lowercase , lowercase)
| 203
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'unc-nlp/lxmert-base-uncased': 512,
}
lowercase__ = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LxmertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
a__: Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , lowercase) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase) != tokenize_chinese_chars
):
a__: int = getattr(lowercase , normalizer_state.pop('type'))
a__: Dict = do_lower_case
a__: Dict = strip_accents
a__: Optional[int] = tokenize_chinese_chars
a__: List[Any] = normalizer_class(**lowercase)
a__: Optional[int] = do_lower_case
def lowerCamelCase_ ( self , lowercase , lowercase=None) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: List[Any] = [self.sep_token_id]
a__: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__: List[Any] = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
| 203
| 1
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case =get_logger()
__snake_case =None
class UpperCAmelCase_ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : Dict ) -> int:
super().__init__(features=UpperCAmelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCAmelCase__ )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowerCAmelCase = device if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
lowerCAmelCase = str(jax.devices()[0] )
lowerCAmelCase = jnp_array_kwargs
@staticmethod
def __UpperCAmelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(UpperCAmelCase__ ): device for device in jax.devices()}
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and column:
if all(
isinstance(UpperCAmelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase__ , axis=0 )
return column
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase__ , (str, bytes, type(UpperCAmelCase__ )) ):
return value
elif isinstance(UpperCAmelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase = {}
if isinstance(UpperCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase = {'dtype': jnp.intaa}
else:
lowerCAmelCase = {'dtype': jnp.intaa}
elif isinstance(UpperCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> str:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase__ , '__array__' ) and not isinstance(UpperCAmelCase__ , jax.Array ):
lowerCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : dict ) -> str:
return map_nested(self._recursive_tensorize , UpperCAmelCase__ , map_list=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : pa.Table ) -> Mapping:
lowerCAmelCase = self.numpy_arrow_extractor().extract_row(UpperCAmelCase__ )
lowerCAmelCase = self.python_features_decoder.decode_row(UpperCAmelCase__ )
return self.recursive_tensorize(UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : pa.Table ) -> "jax.Array":
lowerCAmelCase = self.numpy_arrow_extractor().extract_column(UpperCAmelCase__ )
lowerCAmelCase = self.python_features_decoder.decode_column(UpperCAmelCase__ , pa_table.column_names[0] )
lowerCAmelCase = self.recursive_tensorize(UpperCAmelCase__ )
lowerCAmelCase = self._consolidate(UpperCAmelCase__ )
return column
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : pa.Table ) -> Mapping:
lowerCAmelCase = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase__ )
lowerCAmelCase = self.python_features_decoder.decode_batch(UpperCAmelCase__ )
lowerCAmelCase = self.recursive_tensorize(UpperCAmelCase__ )
for column_name in batch:
lowerCAmelCase = self._consolidate(batch[column_name] )
return batch
| 4
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4
| 1
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = 4
A_ : List[Any] = 3
A_ : Dict = (32, 32)
A_ : str = jax.random.PRNGKey(0 )
A_ : Tuple = jax.random.uniform(snake_case , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
| 70
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
_lowerCAmelCase : Dict[Optional[str], str] = {}
_lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __snake_case ( _lowerCAmelCase : type , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None , ) -> List[Any]:
A_ : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
A_ : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
A_ : Union[str, Any] = format_type
def __snake_case ( _lowerCAmelCase : Exception , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None ) -> Optional[int]:
A_ : Optional[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ : List[str] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
_lowerCAmelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
_lowerCAmelCase : Tuple = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
_lowerCAmelCase : List[str] = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __snake_case ( _lowerCAmelCase : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( _lowerCAmelCase : Optional[str] , **_lowerCAmelCase : str ) -> Formatter:
A_ : str = get_format_type_from_alias(_lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 70
| 1
|
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> int:
_snake_case = data
_snake_case = previous
_snake_case = next_node
def __str__(self ) -> str:
return f"""{self.data}"""
def lowercase (self ) -> int:
return self.data
def lowercase (self ) -> Dict:
return self.next
def lowercase (self ) -> Union[str, Any]:
return self.previous
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> List[str]:
_snake_case = head
def __iter__(self ) -> Optional[Any]:
return self
def lowercase (self ) -> str:
if not self.current:
raise StopIteration
else:
_snake_case = self.current.get_data()
_snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self ) -> Optional[int]:
_snake_case = None # First node in list
_snake_case = None # Last node in list
def __str__(self ) -> Optional[int]:
_snake_case = self.head
_snake_case = []
while current is not None:
nodes.append(current.get_data() )
_snake_case = current.get_next()
return " ".join(str(UpperCAmelCase ) for node in nodes )
def __contains__(self , UpperCAmelCase ) -> int:
_snake_case = self.head
while current:
if current.get_data() == value:
return True
_snake_case = current.get_next()
return False
def __iter__(self ) -> Union[str, Any]:
return LinkedListIterator(self.head )
def lowercase (self ) -> str:
if self.head:
return self.head.get_data()
return None
def lowercase (self ) -> List[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowercase (self , UpperCAmelCase ) -> None:
if self.head is None:
_snake_case = node
_snake_case = node
else:
self.insert_before_node(self.head , UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> None:
if self.head is None:
self.set_head(UpperCAmelCase )
else:
self.insert_after_node(self.tail , UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> None:
_snake_case = Node(UpperCAmelCase )
if self.head is None:
self.set_head(UpperCAmelCase )
else:
self.set_tail(UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = node
_snake_case = node.previous
if node.get_previous() is None:
_snake_case = node_to_insert
else:
_snake_case = node_to_insert
_snake_case = node_to_insert
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = node
_snake_case = node.next
if node.get_next() is None:
_snake_case = node_to_insert
else:
_snake_case = node_to_insert
_snake_case = node_to_insert
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = 1
_snake_case = Node(UpperCAmelCase )
_snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCAmelCase , UpperCAmelCase )
return
current_position += 1
_snake_case = node.next
self.insert_after_node(self.tail , UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Node:
_snake_case = self.head
while node:
if node.get_data() == item:
return node
_snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
if (node := self.get_node(UpperCAmelCase )) is not None:
if node == self.head:
_snake_case = self.head.get_next()
if node == self.tail:
_snake_case = self.tail.get_previous()
self.remove_node_pointers(UpperCAmelCase )
@staticmethod
def lowercase (UpperCAmelCase ) -> None:
if node.get_next():
_snake_case = node.previous
if node.get_previous():
_snake_case = node.next
_snake_case = None
_snake_case = None
def lowercase (self ) -> Dict:
return self.head is None
def __SCREAMING_SNAKE_CASE ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 10**9 ):
_snake_case = 1
_snake_case = 2
_snake_case = 0
_snake_case = 0
_snake_case = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 341
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = ['''image_processor''', '''tokenizer''']
_lowercase : List[str] = '''BlipImageProcessor'''
_lowercase : Optional[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = False
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.image_processor
def __call__( self: List[Any] , UpperCamelCase_: ImageInput = None , UpperCamelCase_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[bool, str, PaddingStrategy] = False , UpperCamelCase_: Union[bool, str, TruncationStrategy] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[str, TensorType]] = None , **UpperCamelCase_: Tuple , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
return text_encoding
# add pixel_values
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
if text is not None:
lowercase__ = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase_ )
return encoding_image_processor
def lowerCamelCase_ ( self: Dict , *UpperCamelCase_: Dict , **UpperCamelCase_: List[Any] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , *UpperCamelCase_: Tuple , **UpperCamelCase_: List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 93
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = ShapEImgaImgPipeline
_lowercase : Optional[Any] = ['''image''']
_lowercase : Optional[int] = ['''image''']
_lowercase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Tuple = False
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int]=0 ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 93
| 1
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ) -> int:
lowercase_ = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : List[str] ) -> Union[str, Any]:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : str ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase_ = ort.SessionOptions()
lowercase_ = False
return options
def _lowercase ( self : Optional[Any] ) -> Tuple:
lowercase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase_ = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A fantasy landscape, trending on artstation'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
lowercase_ = output.images
lowercase_ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowercase ( self : Dict ) -> Optional[Any]:
lowercase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase_ = init_image.resize((1_2_8, 1_2_8) )
lowercase_ = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A fantasy landscape, trending on artstation'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
lowercase_ = output.images
lowercase_ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 30
|
def a__ ( __UpperCamelCase = 1_0_0_0 ):
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE_ = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE_ = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE_ = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 118
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_a = 'blip_2_vision_model'
def __init__( self : List[str], lowerCamelCase : int=1408, lowerCamelCase : str=6144, lowerCamelCase : str=39, lowerCamelCase : List[str]=16, lowerCamelCase : List[str]=224, lowerCamelCase : List[Any]=14, lowerCamelCase : Dict="gelu", lowerCamelCase : int=0.00_001, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[int]=1E-10, lowerCamelCase : Dict=True, **lowerCamelCase : Optional[Any], )-> int:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =hidden_size
lowerCamelCase__ : str =intermediate_size
lowerCamelCase__ : Dict =num_hidden_layers
lowerCamelCase__ : Optional[Any] =num_attention_heads
lowerCamelCase__ : int =patch_size
lowerCamelCase__ : List[Any] =image_size
lowerCamelCase__ : Dict =initializer_range
lowerCamelCase__ : str =attention_dropout
lowerCamelCase__ : Dict =layer_norm_eps
lowerCamelCase__ : Any =hidden_act
lowerCamelCase__ : Any =qkv_bias
@classmethod
def snake_case ( cls : Tuple, lowerCamelCase : Union[str, os.PathLike], **lowerCamelCase : Union[str, Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =cls.get_config_dict(lowerCamelCase, **lowerCamelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase__ : Union[str, Any] =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase, **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_a = 'blip_2_qformer'
def __init__( self : Optional[Any], lowerCamelCase : Optional[Any]=3_0522, lowerCamelCase : List[Any]=768, lowerCamelCase : Optional[int]=12, lowerCamelCase : Tuple=12, lowerCamelCase : Union[str, Any]=3072, lowerCamelCase : List[str]="gelu", lowerCamelCase : Tuple=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : Any=512, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=1E-12, lowerCamelCase : Tuple=0, lowerCamelCase : Dict="absolute", lowerCamelCase : List[Any]=2, lowerCamelCase : List[Any]=1408, **lowerCamelCase : str, )-> Union[str, Any]:
super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =vocab_size
lowerCamelCase__ : Dict =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Any =hidden_act
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : Any =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : List[Any] =initializer_range
lowerCamelCase__ : Any =layer_norm_eps
lowerCamelCase__ : Tuple =position_embedding_type
lowerCamelCase__ : Optional[int] =cross_attention_frequency
lowerCamelCase__ : Optional[Any] =encoder_hidden_size
@classmethod
def snake_case ( cls : Dict, lowerCamelCase : Union[str, os.PathLike], **lowerCamelCase : Any )-> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase )
lowerCamelCase__ : Optional[int] =cls.get_config_dict(lowerCamelCase, **lowerCamelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase__ : Optional[Any] =config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase, **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_a = 'blip-2'
_a = True
def __init__( self : Union[str, Any], lowerCamelCase : Any=None, lowerCamelCase : List[str]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=32, **lowerCamelCase : List[str] )-> Union[str, Any]:
super().__init__(**lowerCamelCase )
if vision_config is None:
lowerCamelCase__ : List[str] ={}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
lowerCamelCase__ : Any ={}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
lowerCamelCase__ : Union[str, Any] ={}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowerCamelCase__ : Optional[int] =BlipaVisionConfig(**lowerCamelCase )
lowerCamelCase__ : List[str] =BlipaQFormerConfig(**lowerCamelCase )
lowerCamelCase__ : List[str] =text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase__ : List[str] =CONFIG_MAPPING[text_model_type](**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =self.text_config.tie_word_embeddings
lowerCamelCase__ : Dict =self.text_config.is_encoder_decoder
lowerCamelCase__ : Union[str, Any] =num_query_tokens
lowerCamelCase__ : List[Any] =self.vision_config.hidden_size
lowerCamelCase__ : int =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__ : str =1.0
lowerCamelCase__ : str =0.02
@classmethod
def snake_case ( cls : int, lowerCamelCase : BlipaVisionConfig, lowerCamelCase : BlipaQFormerConfig, lowerCamelCase : PretrainedConfig, **lowerCamelCase : Any, )-> List[str]:
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **lowerCamelCase, )
def snake_case ( self : List[str] )-> int:
lowerCamelCase__ : Union[str, Any] =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Union[str, Any] =self.vision_config.to_dict()
lowerCamelCase__ : Optional[Any] =self.qformer_config.to_dict()
lowerCamelCase__ : Dict =self.text_config.to_dict()
lowerCamelCase__ : Optional[int] =self.__class__.model_type
return output
| 351
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'blenderbot-small'
_a = ['past_key_values']
_a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple, lowerCamelCase : Any=5_0265, lowerCamelCase : Optional[Any]=512, lowerCamelCase : Union[str, Any]=8, lowerCamelCase : Dict=2048, lowerCamelCase : str=16, lowerCamelCase : List[Any]=8, lowerCamelCase : List[str]=2048, lowerCamelCase : int=16, lowerCamelCase : Any=0.0, lowerCamelCase : Dict=0.0, lowerCamelCase : Tuple=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Tuple=512, lowerCamelCase : Tuple=0.1, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : List[str]=0.02, lowerCamelCase : Any=1, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Any=0, lowerCamelCase : Tuple=1, lowerCamelCase : Tuple=2, lowerCamelCase : Dict=2, **lowerCamelCase : Any, )-> Dict:
lowerCamelCase__ : Dict =vocab_size
lowerCamelCase__ : Dict =max_position_embeddings
lowerCamelCase__ : Optional[Any] =d_model
lowerCamelCase__ : Union[str, Any] =encoder_ffn_dim
lowerCamelCase__ : Optional[Any] =encoder_layers
lowerCamelCase__ : Any =encoder_attention_heads
lowerCamelCase__ : Union[str, Any] =decoder_ffn_dim
lowerCamelCase__ : Optional[int] =decoder_layers
lowerCamelCase__ : Any =decoder_attention_heads
lowerCamelCase__ : Optional[int] =dropout
lowerCamelCase__ : str =attention_dropout
lowerCamelCase__ : Union[str, Any] =activation_dropout
lowerCamelCase__ : Tuple =activation_function
lowerCamelCase__ : str =init_std
lowerCamelCase__ : List[Any] =encoder_layerdrop
lowerCamelCase__ : List[str] =decoder_layerdrop
lowerCamelCase__ : Tuple =use_cache
lowerCamelCase__ : Optional[Any] =encoder_layers
lowerCamelCase__ : List[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, decoder_start_token_id=lowerCamelCase, forced_eos_token_id=lowerCamelCase, **lowerCamelCase, )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ : List[str] ={0: '''batch'''}
lowerCamelCase__ : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCamelCase__ : Union[str, Any] ={0: '''batch''', 1: '''decoder_sequence'''}
lowerCamelCase__ : Tuple ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase, direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Optional[Any] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.num_layers
for i in range(lowerCamelCase ):
lowerCamelCase__ : Optional[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCamelCase__ : str =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def snake_case ( self : Dict )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict =super().outputs
else:
lowerCamelCase__ : Optional[Any] =super(lowerCamelCase, self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : str =self.num_layers
for i in range(lowerCamelCase ):
lowerCamelCase__ : Tuple ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ : Dict ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def snake_case ( self : List[str], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
lowerCamelCase__ : Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : str =seq_length if not self.use_past else 1
lowerCamelCase__ : Tuple =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : str ={F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Tuple =dict(**lowerCamelCase, **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : List[Any] =common_inputs['''input_ids'''].shape
lowerCamelCase__ : Optional[Any] =common_inputs['''decoder_input_ids'''].shape[1]
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.num_attention_heads
lowerCamelCase__ : str =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : List[Any] =decoder_seq_length + 3
lowerCamelCase__ : Optional[int] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : Optional[Any] =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase )], dim=1 )
lowerCamelCase__ : Tuple =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : int =self.num_layers
lowerCamelCase__ : Any =min(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any =max(lowerCamelCase, lowerCamelCase ) - min_num_layers
lowerCamelCase__ : Dict ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Union[str, Any] =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase, lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def snake_case ( self : List[str], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
lowerCamelCase__ : int =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : List[str] =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Union[str, Any] =seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : int =self.num_layers
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.num_attention_heads
lowerCamelCase__ : int =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : str =common_inputs['''attention_mask'''].dtype
lowerCamelCase__ : int =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase, dtype=lowerCamelCase )], dim=1 )
lowerCamelCase__ : str =[
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def snake_case ( self : Optional[int], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ : int =compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : Optional[int] =tokenizer.num_special_tokens_to_add(lowerCamelCase )
lowerCamelCase__ : Optional[int] =compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] =dict(tokenizer(lowerCamelCase, return_tensors=lowerCamelCase ) )
return common_inputs
def snake_case ( self : List[Any], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Union[str, Any] =self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
else:
lowerCamelCase__ : List[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
return common_inputs
def snake_case ( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Any )-> str:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Any =super()._flatten_past_key_values_(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase__ : List[str] =super(lowerCamelCase, self )._flatten_past_key_values_(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
| 272
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__snake_case = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
__snake_case = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
__snake_case = """▁"""
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : Optional[Any] = BarthezTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
snake_case : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : Tuple = vocab_file
snake_case : Tuple = False if not self.vocab_file else True
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : List[str] = [self.cls_token_id]
snake_case : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : Any = [self.sep_token_id]
snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : str = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 203
|
"""simple docstring"""
from typing import List
import numpy as np
def __lowerCAmelCase ( lowercase : dict ) -> int:
"""simple docstring"""
snake_case : Union[str, Any] = {key: len(lowercase ) for key, value in gen_kwargs.items() if isinstance(lowercase , lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
snake_case : int = max(lists_lengths.values() , default=0 )
return max(1 , lowercase )
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> List[range]:
"""simple docstring"""
snake_case : Union[str, Any] = []
for group_idx in range(lowercase ):
snake_case : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
snake_case : int = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
snake_case : Dict = range(lowercase , start + num_shards_to_add )
shards_indices_per_group.append(lowercase )
return shards_indices_per_group
def __lowerCAmelCase ( lowercase : dict , lowercase : int ) -> List[dict]:
"""simple docstring"""
snake_case : int = _number_of_shards_in_gen_kwargs(lowercase )
if num_shards == 1:
return [dict(lowercase )]
else:
snake_case : Optional[int] = _distribute_shards(num_shards=lowercase , max_num_jobs=lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowercase , lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowercase ) )
]
def __lowerCAmelCase ( lowercase : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowerCAmelCase ( lowercase : np.random.Generator , lowercase : dict ) -> dict:
"""simple docstring"""
snake_case : Tuple = {len(lowercase ) for value in gen_kwargs.values() if isinstance(lowercase , lowercase )}
snake_case : str = {}
for size in list_sizes:
snake_case : Optional[int] = list(range(lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
snake_case : Dict = dict(lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(lowercase , lowercase ):
snake_case : Dict = [value[i] for i in indices_per_size[len(lowercase )]]
return shuffled_kwargs
| 203
| 1
|
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ ) -> list[str]:
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
lowerCamelCase = number_of_bytes // partitions
lowerCamelCase = []
for i in range(snake_case__ ):
lowerCamelCase = i * bytes_per_partition + 1
lowerCamelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_a , speech_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , )
def _lowerCAmelCase ( self , _a = "auto" ):
"""simple docstring"""
if slice_size == "auto":
lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.enable_attention_slicing(_a )
@torch.no_grad()
def __call__( self , _a , _a=16_000 , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
"""simple docstring"""
lowerCamelCase = self.speech_processor.feature_extractor(
_a , return_tensors="""pt""" , sampling_rate=_a ).input_features.to(self.device )
lowerCamelCase = self.speech_model.generate(_a , max_length=480_000 )
lowerCamelCase = self.speech_processor.tokenizer.batch_decode(_a , skip_special_tokens=_a , normalize=_a )[
0
]
if isinstance(_a , _a ):
lowerCamelCase = 1
elif isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_a )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_a )}.' )
# get prompt text embeddings
lowerCamelCase = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase , lowerCamelCase , lowerCamelCase = text_embeddings.shape
lowerCamelCase = text_embeddings.repeat(1 , _a , 1 )
lowerCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , _a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase = 42
if negative_prompt is None:
lowerCamelCase = [""""""] * batch_size
elif type(_a ) is not type(_a ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(_a )} !='
f' {type(_a )}.' )
elif isinstance(_a , _a ):
lowerCamelCase = [negative_prompt]
elif batch_size != len(_a ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(_a )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
lowerCamelCase = negative_prompt
lowerCamelCase = text_input_ids.shape[-1]
lowerCamelCase = self.tokenizer(
_a , padding="""max_length""" , max_length=_a , truncation=_a , return_tensors="""pt""" , )
lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase = uncond_embeddings.shape[1]
lowerCamelCase = uncond_embeddings.repeat(1 , _a , 1 )
lowerCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , _a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase = torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to(
self.device )
else:
lowerCamelCase = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase = {}
if accepts_eta:
lowerCamelCase = eta
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
lowerCamelCase = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase = noise_pred.chunk(2 )
lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a , _a )
lowerCamelCase = 1 / 0.18_215 * latents
lowerCamelCase = self.vae.decode(_a ).sample
lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(_a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 168
| 1
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_lowerCAmelCase = cst_fwd.get(lowerCAmelCase , np.inf )
_lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_lowerCAmelCase = new_cost_f
_lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = -1
_lowerCAmelCase = set()
_lowerCAmelCase = set()
_lowerCAmelCase = {source: 0}
_lowerCAmelCase = {destination: 0}
_lowerCAmelCase = {source: None}
_lowerCAmelCase = {destination: None}
_lowerCAmelCase = PriorityQueue()
_lowerCAmelCase = PriorityQueue()
_lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_lowerCAmelCase , _lowerCAmelCase = queue_forward.get()
visited_forward.add(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = queue_backward.get()
visited_backward.add(lowerCAmelCase )
_lowerCAmelCase = pass_and_relaxation(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
_lowerCAmelCase = pass_and_relaxation(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_lowerCAmelCase = shortest_distance
return shortest_path_distance
A__ : Optional[int] ={
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__ : int ={
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( snake_case_ ):
_lowercase: Union[str, Any] = ['''image_processor''', '''tokenizer''']
_lowercase: int = '''AutoImageProcessor'''
_lowercase: Optional[int] = '''AutoTokenizer'''
def __init__( self : int , __snake_case : Tuple=None , __snake_case : Optional[int]=None , **__snake_case : Tuple ) -> List[Any]:
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
_lowerCAmelCase = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
_lowerCAmelCase = self.image_processor
_lowerCAmelCase = False
def __call__( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
_lowerCAmelCase = kwargs.pop("""images""" , __snake_case )
_lowerCAmelCase = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_lowerCAmelCase = self.image_processor(__snake_case , *__snake_case , **__snake_case )
if text is not None:
_lowerCAmelCase = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCAmelCase = encodings["""input_ids"""]
return inputs
def lowercase__ ( self : List[Any] , *__snake_case : Dict , **__snake_case : List[str] ) -> int:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase__ ( self : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Any:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def lowercase__ ( self : int ) -> Optional[Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer
yield
_lowerCAmelCase = self.image_processor
_lowerCAmelCase = False
def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Dict=None ) -> Tuple:
if added_vocab is None:
_lowerCAmelCase = self.tokenizer.get_added_vocab()
_lowerCAmelCase = {}
while tokens:
_lowerCAmelCase = re.search(R"""<s_(.*?)>""" , __snake_case , re.IGNORECASE )
if start_token is None:
break
_lowerCAmelCase = start_token.group(1 )
_lowerCAmelCase = re.search(Rf"</s_{key}>" , __snake_case , re.IGNORECASE )
_lowerCAmelCase = start_token.group()
if end_token is None:
_lowerCAmelCase = tokens.replace(__snake_case , """""" )
else:
_lowerCAmelCase = end_token.group()
_lowerCAmelCase = re.escape(__snake_case )
_lowerCAmelCase = re.escape(__snake_case )
_lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __snake_case , re.IGNORECASE )
if content is not None:
_lowerCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_lowerCAmelCase = self.tokenajson(__snake_case , is_inner_value=__snake_case , added_vocab=__snake_case )
if value:
if len(__snake_case ) == 1:
_lowerCAmelCase = value[0]
_lowerCAmelCase = value
else: # leaf nodes
_lowerCAmelCase = []
for leaf in content.split(R"""<sep/>""" ):
_lowerCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_lowerCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__snake_case )
if len(output[key] ) == 1:
_lowerCAmelCase = output[key][0]
_lowerCAmelCase = tokens[tokens.find(__snake_case ) + len(__snake_case ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__snake_case , added_vocab=__snake_case )
if len(__snake_case ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowercase__ ( self : List[Any] ) -> Any:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 70
| 1
|
def __lowerCamelCase ( lowerCAmelCase__ ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
lowerCAmelCase__ = 4
lowerCAmelCase__ = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase__ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 370
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class a_ :
'''simple docstring'''
UpperCAmelCase_ = None
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = None
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
UpperCAmelCase_ = False
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def __snake_case ( self : Dict):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 119
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = CLIPTokenizer
lowerCAmelCase_ = CLIPTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {}
lowerCAmelCase_ = False
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase_ : List[str] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowercase_ : Union[str, Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowercase_ : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowercase_ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = '''lower newer'''
lowercase_ : Optional[int] = '''lower newer'''
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : Optional[Any] = '''lower newer'''
lowercase_ : Any = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowercase_ : int = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tokens + [tokenizer.unk_token]
lowercase_ : Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
@require_ftfy
def _snake_case ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowercase_ : Tuple = tokenizer_s.tokenize(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = tokenizer_r.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase_ : Optional[Any] = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowercase_ : Any = tokenizer_s.tokenize(__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer_r.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Test that the tokenization is identical on unicode of space type
lowercase_ : Any = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase_ : int = tokenizer_s.tokenize(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tokenizer_r.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Test that the tokenization is identical on unicode of line break type
lowercase_ : List[Any] = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase_ : int = tokenizer_s.tokenize(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = tokenizer_r.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : List[str] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ : Optional[int] = F'''{text_of_1_token} {text_of_1_token}'''
lowercase_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , )
lowercase_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : Dict = F''' {text}'''
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , )
lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
def _snake_case ( self ):
"""simple docstring"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def _snake_case ( self ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def _snake_case ( self ):
"""simple docstring"""
pass
| 93
|
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Optional[int] =TextToVideoSDPipeline
A__ : int =TEXT_TO_IMAGE_PARAMS
A__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
A__ : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def A_ ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def A_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict=0 ):
if str(__lowerCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = TextToVideoSDPipeline(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = 'np'
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCAmelCase ).frames
SCREAMING_SNAKE_CASE__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Union[str, Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def A_ ( self : List[Any] ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def A_ ( self : List[str] ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def A_ ( self : List[Any] ):
pass
def A_ ( self : str ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase__ ( unittest.TestCase ):
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
SCREAMING_SNAKE_CASE__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
SCREAMING_SNAKE_CASE__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ = pipe.to('cuda' )
SCREAMING_SNAKE_CASE__ = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=25 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
SCREAMING_SNAKE_CASE__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
SCREAMING_SNAKE_CASE__ = pipe.to('cuda' )
SCREAMING_SNAKE_CASE__ = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 360
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__snake_case = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__snake_case = get_tests_dir("""fixtures/vocab.json""")
__snake_case = get_tests_dir("""fixtures""")
class lowercase__ ( unittest.TestCase ):
A__ : List[Any] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = 0
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'vocab.json' ) )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
# save in new folder
processor.save_pretrained(UpperCAmelCase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ )
config_dict.pop('processor_class' )
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' ) as f:
f.write(json.dumps(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
# save in new folder
processor.save_pretrained(UpperCAmelCase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ )
config_dict.pop('processor_class' )
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' ) as f:
f.write(json.dumps(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(UpperCAmelCase_ )
# copy relevant files
copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' ) as f:
f.write('{}' )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
SCREAMING_SNAKE_CASE__ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
SCREAMING_SNAKE_CASE__ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def A_ ( self : Union[str, Any] ):
try:
AutoConfig.register('custom' , UpperCAmelCase_ )
AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase_ , 'vocab.txt' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = CustomTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Tuple ):
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[int] =False
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[int] =False
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict ="""AutoFeatureExtractor"""
A__ : Optional[int] ="""AutoTokenizer"""
A__ : str =False
try:
AutoConfig.register('custom' , UpperCAmelCase_ )
AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
A__ : List[Any] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def A_ ( cls : str ):
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def A_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase_ , 'test-processor' ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase_ , 'test-processor-org' ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token , organization='valid_org' , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Optional[Any] ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase_ , 'vocab.txt' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = CustomTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
SCREAMING_SNAKE_CASE__ = Repository(UpperCAmelCase_ , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(UpperCAmelCase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCAmelCase_ , 'tokenizer_config.json' ) ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , 'custom_processing.py' ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 169
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
_lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
_lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case )-1}' )
if "norm" in key:
_lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
_lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case )-1}' )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
_lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(snake_case )-1}' )
if "attn.q" in key:
_lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case )-1}' )
if "bot_conv" in key:
_lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
_lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
_lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
_lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
_lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
_lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
_lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
_lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
_lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case , snake_case=False , snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowerCAmelCase = GLPNImageProcessor()
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
_lowerCAmelCase = torch.load(snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase = rename_keys(snake_case )
# key and value matrices need special treatment
read_in_k_v(snake_case , snake_case )
# create HuggingFace model and load state dict
_lowerCAmelCase = GLPNForDepthEstimation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# forward pass
_lowerCAmelCase = model(snake_case )
_lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
_lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
_lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case , )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
A__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = len(_UpperCAmelCase )
for i in range(1, _UpperCAmelCase ):
__UpperCAmelCase : int = collection[i]
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : List[str] = i - 1
while low <= high:
__UpperCAmelCase : Tuple = (low + high) // 2
if val < collection[mid]:
__UpperCAmelCase : Dict = mid - 1
else:
__UpperCAmelCase : List[str] = mid + 1
for j in range(_UpperCAmelCase, _UpperCAmelCase, -1 ):
__UpperCAmelCase : Union[str, Any] = collection[j - 1]
__UpperCAmelCase : str = val
return collection
if __name__ == "__main__":
lowerCAmelCase__ : Any = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 37
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Dict=[1, 2, 1] , UpperCAmelCase_ : str=[2, 2, 4] , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[Any]=2.0 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[Any]=1e-5 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : List[Any]=8 , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : int = num_heads
__UpperCAmelCase : List[str] = window_size
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[Any] = qkv_bias
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = drop_path_rate
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Optional[Any] = use_absolute_embeddings
__UpperCAmelCase : List[str] = patch_norm
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : int = encoder_stride
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = SwinvaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase_ )
__UpperCAmelCase : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[str] = SwinvaForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.type_sequence_label_size
__UpperCAmelCase : Dict = SwinvaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Dict = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = SwinvaModelTester(self )
__UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=37 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : str = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Union[str, Any] = outputs.attentions
__UpperCAmelCase : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[str] = config.window_size**2
__UpperCAmelCase : Union[str, Any] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Dict = outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
# Check attention is always last and order is fine
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
__UpperCAmelCase : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase_ ) )
__UpperCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Dict = outputs.hidden_states
__UpperCAmelCase : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# Swinv2 has a different seq_length
__UpperCAmelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : str = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = reshaped_hidden_states[0].shape
__UpperCAmelCase : Dict = (
reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = 3
__UpperCAmelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = SwinvaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
UpperCAmelCase_ )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__UpperCAmelCase : Any = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**UpperCAmelCase_ )
# verify the logits
__UpperCAmelCase : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
__UpperCAmelCase : str = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 37
| 1
|
'''simple docstring'''
import qiskit
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_a = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase__ )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 168
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a_ : str = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
a_ : Tuple = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a_ : Any = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a_ : Union[str, Any] = sorted(arg_to_scheduler.keys())
a_ : List[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class a ( pl.LightningModule ):
def __init__( self , __magic_name__ , __magic_name__=None , __magic_name__="base" , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> List[str]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__magic_name__ )
_a = 0
_a = Path(self.hparams.output_dir )
_a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__magic_name__ , **__magic_name__ , )
else:
_a = config
_a = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __magic_name__ , __magic_name__ ):
assert hasattr(self.config , __magic_name__ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __magic_name__ , getattr(self.hparams , __magic_name__ ) )
if tokenizer is None:
_a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__magic_name__ , )
else:
_a = tokenizer
_a = MODEL_MODES[mode]
if model is None:
_a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__magic_name__ , )
else:
_a = model
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
_a = self.model_type.from_pretrained(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
_a = arg_to_scheduler[self.hparams.lr_scheduler]
_a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_a = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCAmelCase ( self ) -> Any:
_a = self.model
_a = ['bias', 'LayerNorm.weight']
_a = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_a = Adafactor(
__magic_name__ , lr=self.hparams.learning_rate , scale_parameter=__magic_name__ , relative_step=__magic_name__ )
else:
_a = AdamW(
__magic_name__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_a = optimizer
_a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[str]:
return self.validation_step(__magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
return self.validation_end(__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
if stage == "test":
_a = len(self.test_dataloader().dataset )
else:
_a = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__magic_name__ )
_a = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = False ) -> int:
raise NotImplementedError('You must implement this for your task' )
def __UpperCAmelCase ( self ) -> Tuple:
return self.train_loader
def __UpperCAmelCase ( self ) -> Dict:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def __UpperCAmelCase ( self ) -> Tuple:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__magic_name__ , list(filter(__magic_name__ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
_a = self.output_dir.joinpath('best_tfmr' )
_a = self.step_count
self.model.save_pretrained(__magic_name__ )
self.tokenizer.save_pretrained(__magic_name__ )
@staticmethod
def __UpperCAmelCase ( __magic_name__ , __magic_name__ ) -> Optional[int]:
parser.add_argument(
'--model_name_or_path' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__magic_name__ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__magic_name__ , type=__magic_name__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__magic_name__ ).parent / 'test_run' / 'cache' ) , type=__magic_name__ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__magic_name__ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__magic_name__ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__magic_name__ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__magic_name__ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=__magic_name__ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__magic_name__ , metavar=__magic_name__ , type=__magic_name__ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__magic_name__ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__magic_name__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__magic_name__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__magic_name__ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__magic_name__ )
parser.add_argument('--train_batch_size' , default=32 , type=__magic_name__ )
parser.add_argument('--eval_batch_size' , default=32 , type=__magic_name__ )
parser.add_argument('--adafactor' , action='store_true' )
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Any:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__magic_name__ )
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = trainer.lr_schedulers[0]['scheduler']
_a = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
rank_zero_info('***** Validation results *****' )
_a = trainer.callback_metrics
# Log results
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
rank_zero_info('***** Test results *****' )
_a = trainer.callback_metrics
# Log and save results to file
_a = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__magic_name__ , 'w' ) as writer:
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> None:
'''simple docstring'''
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase__ ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase__ , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase__ )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase__ , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase__ , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase__ ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase__ , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _A (lowerCAmelCase__ :BaseTransformer , lowerCAmelCase__ :argparse.Namespace , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=[] , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Union[str, Any]=None , **lowerCAmelCase__ :List[str] , ) -> str:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
_a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
_a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
_a = LoggingCallback()
_a = {}
if args.fpaa:
_a = 16
if args.gpus > 1:
_a = 'auto'
_a = 'ddp'
_a = args.accumulate_grad_batches
_a = None
_a = 'auto'
_a = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 168
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowercase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.resolver.write_model_card('''opus-mt-he-en''', dry_run=__lowercase )
assert mmeta["long_pair"] == "heb-eng"
| 367
|
from __future__ import annotations
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = len(_A )
# We need to create solution object to save path.
lowerCAmelCase_ = [[0 for _ in range(_A )] for _ in range(_A )]
lowerCAmelCase_ = run_maze(_A , 0 , 0 , _A )
if solved:
print('''\n'''.join(str(_A ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = len(_A )
# Final check point.
if i == j == (size - 1):
lowerCAmelCase_ = 1
return True
lowerCAmelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowerCAmelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCAmelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCAmelCase_ = 1
# check for directions
if (
run_maze(_A , i + 1 , _A , _A )
or run_maze(_A , _A , j + 1 , _A )
or run_maze(_A , i - 1 , _A , _A )
or run_maze(_A , _A , j - 1 , _A )
):
return True
lowerCAmelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
| 0
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase ( a_ , a_=0.999 , a_="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__A = []
for i in range(snake_case__ ):
__A = i / num_diffusion_timesteps
__A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
snake_case_ = [e.name for e in KarrasDiffusionSchedulers]
snake_case_ = 2
@register_to_config
def __init__( self : Optional[int] ,A : Optional[Any] = 10_00 ,A : Union[str, Any] = 0.0_00_85 ,A : List[Any] = 0.0_12 ,A : int = "linear" ,A : List[Any] = None ,A : List[Any] = "epsilon" ,A : List[str] = "linspace" ,A : List[str] = 0 ,):
if trained_betas is not None:
__A = torch.tensor(SCREAMING_SNAKE_CASE_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
__A = torch.linspace(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,SCREAMING_SNAKE_CASE_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__A = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__A = 1.0 - self.betas
__A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : str=None ):
if schedule_timesteps is None:
__A = self.timesteps
__A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__A = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
__A = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
__A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ ( self : Dict ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ ( self : List[str] ,A : Union[str, Any] ,A : Optional[Any] ,):
__A = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
if self.state_in_first_order:
__A = self.sigmas[step_index]
else:
__A = self.sigmas_interpol[step_index]
__A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ ( self : List[Any] ,A : str ,A : Optional[int] = None ,A : str = None ,):
__A = num_inference_steps
__A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__A = np.linspace(0 ,num_train_timesteps - 1 ,SCREAMING_SNAKE_CASE_ ,dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__A = (np.arange(0 ,SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__A = (np.arange(SCREAMING_SNAKE_CASE_ ,0 ,-step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__A = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
__A = np.interp(SCREAMING_SNAKE_CASE_ ,np.arange(0 ,len(SCREAMING_SNAKE_CASE_ ) ) ,SCREAMING_SNAKE_CASE_ )
__A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__A = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
# interpolate sigmas
__A = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
__A = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__A = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE_ ).startswith("mps" ):
# mps does not support float64
__A = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ,dtype=torch.floataa )
else:
__A = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
# interpolate timesteps
__A = self.sigma_to_t(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ,dtype=timesteps.dtype )
__A = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
__A = torch.cat([timesteps[:1], interleaved_timesteps] )
__A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__A = defaultdict(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( self : Dict ,A : Dict ):
# get log sigma
__A = sigma.log()
# get distribution
__A = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__A = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__A = low_idx + 1
__A = self.log_sigmas[low_idx]
__A = self.log_sigmas[high_idx]
# interpolate sigmas
__A = (low - log_sigma) / (low - high)
__A = w.clamp(0 ,1 )
# transform interpolation to time range
__A = (1 - w) * low_idx + w * high_idx
__A = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ ( self : str ):
return self.sample is None
def UpperCamelCase_ ( self : Any ,A : Tuple ,A : Tuple ,A : Union[str, Any] ,A : Tuple = True ,):
__A = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
__A = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__A = self.sigmas[step_index]
__A = self.sigmas_interpol[step_index + 1]
__A = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__A = self.sigmas[step_index - 1]
__A = self.sigmas_interpol[step_index]
__A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__A = 0
__A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__A = sigma_hat if self.state_in_first_order else sigma_interpol
__A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__A = sigma_hat if self.state_in_first_order else sigma_interpol
__A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__A = sigma_interpol - sigma_hat
# store for 2nd order step
__A = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__A = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__A = sigma_next - sigma_hat
__A = self.sample
__A = None
__A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : Union[str, Any] ,A : int ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
__A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
__A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
__A = self.timesteps.to(original_samples.device )
__A = timesteps.to(original_samples.device )
__A = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for t in timesteps]
__A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__A = sigma.unsqueeze(-1 )
__A = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
return self.config.num_train_timesteps
| 15
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCAmelCase = json.load(f)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : List[Any] = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCamelCase : int = F"""facebook/wmt19-{pair}"""
UpperCamelCase : Optional[int] = self.get_tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.get_model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = bleu_data[pair]['src']
UpperCamelCase : Tuple = bleu_data[pair]['tgt']
UpperCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors='pt', truncation=SCREAMING_SNAKE_CASE_, padding='longest' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids, num_beams=8, )
UpperCamelCase : Tuple = tokenizer.batch_decode(
SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = calculate_bleu(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(scores['bleu'], SCREAMING_SNAKE_CASE_ )
| 119
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __magic_name__ ( unittest.TestCase ):
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , )-> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_attention_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_choices
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_attention_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase_ ( self )-> str:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("albert-base-v2" )
UpperCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = FlaxAlbertModel.from_pretrained("albert-base-v2" )
UpperCamelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )[0]
UpperCamelCase_ = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
UpperCamelCase_ = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
| 355
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE :Optional[int] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
SCREAMING_SNAKE_CASE :Optional[int] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
SCREAMING_SNAKE_CASE :Tuple = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def UpperCAmelCase_ ( self )-> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , )-> int:
UpperCamelCase_ = len(references[0] )
if any(len(_lowercase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase_ = [[refs[i] for refs in references] for i in range(_lowercase )]
UpperCamelCase_ = TER(
normalized=_lowercase , no_punct=_lowercase , asian_support=_lowercase , case_sensitive=_lowercase , )
UpperCamelCase_ = sb_ter.corpus_score(_lowercase , _lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 60
| 0
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids
UpperCamelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
UpperCamelCase = shift_tokens_right(UpperCamelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCamelCase = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
UpperCamelCase = optax.softmax_cross_entropy(UpperCamelCase__ , onehot(UpperCamelCase__ , logits.shape[-1] ) ).mean()
UpperCamelCase = -(labels.shape[-1] * loss.item())
UpperCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 28
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """char"""
UpperCAmelCase_ = """bpe"""
UpperCAmelCase_ = """wp"""
_lowerCAmelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = ["""image_processor""", """char_tokenizer"""]
UpperCAmelCase_ = """ViTImageProcessor"""
UpperCAmelCase_ = """MgpstrTokenizer"""
def __init__( self :List[str] , lowerCamelCase :Dict=None , lowerCamelCase :Optional[int]=None , **lowerCamelCase :List[str] ) -> Optional[Any]:
UpperCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
UpperCAmelCase__ = kwargs.pop("feature_extractor" )
UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self :Optional[Any] , lowerCamelCase :List[str]=None , lowerCamelCase :Any=None , lowerCamelCase :Optional[Any]=None , **lowerCamelCase :Optional[int] ) -> Union[str, Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None:
UpperCAmelCase__ = self.char_tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Optional[Any] ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = sequences
UpperCAmelCase__ = char_preds.size(0 )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "char" )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "bpe" )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "wp" )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in range(lowerCamelCase ):
UpperCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase__ = scores.index(max(lowerCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase__ = {}
UpperCAmelCase__ = final_strs
UpperCAmelCase__ = final_scores
UpperCAmelCase__ = char_strs
UpperCAmelCase__ = bpe_strs
UpperCAmelCase__ = wp_strs
return out
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int , lowerCamelCase :List[str] ) -> Union[str, Any]:
if format == DecodeType.CHARACTER:
UpperCAmelCase__ = self.char_decode
UpperCAmelCase__ = 1
UpperCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
UpperCAmelCase__ = self.bpe_decode
UpperCAmelCase__ = 2
UpperCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
UpperCAmelCase__ = self.wp_decode
UpperCAmelCase__ = 102
UpperCAmelCase__ = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = pred_logits.size(0 )
UpperCAmelCase__ = pred_logits.size(1 )
UpperCAmelCase__ , UpperCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase , sorted=lowerCamelCase )
UpperCAmelCase__ = preds_index.view(-1 , lowerCamelCase )[:, 1:]
UpperCAmelCase__ = decoder(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = torch.nn.functional.softmax(lowerCamelCase , dim=2 ).max(dim=2 )
UpperCAmelCase__ = preds_max_prob[:, 1:]
for index in range(lowerCamelCase ):
UpperCAmelCase__ = preds_str[index].find(lowerCamelCase )
UpperCAmelCase__ = preds_str[index][:pred_eos]
UpperCAmelCase__ = preds_index[index].cpu().tolist()
UpperCAmelCase__ = pred_index.index(lowerCamelCase ) if eos_token in pred_index else -1
UpperCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase )
conf_scores.append(lowerCamelCase )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int ) -> List[str]:
UpperCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase )]
return decode_strs
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Dict ) -> Dict:
return self.bpe_tokenizer.batch_decode(lowerCamelCase )
def UpperCAmelCase_ ( self :str , lowerCamelCase :str ) -> Tuple:
UpperCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase )]
return decode_strs
| 169
| 0
|
def __lowerCamelCase ( __magic_name__ : int , __magic_name__ : int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a__: int =str(bin(__a ) )[2:] # remove the leading "0b"
a__: Optional[int] =str(bin(__a ) )[2:] # remove the leading "0b"
a__: Any =max(len(__a ) , len(__a ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__a ) , b_binary.zfill(__a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
from __future__ import annotations
from math import gcd
def __lowerCamelCase ( __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 1 , __magic_name__ : int = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int:
return (pow(__magic_name__ , 2 ) + step) % modulus
for _ in range(__magic_name__ ):
# These track the position within the cycle detection logic.
a__: List[Any] =seed
a__: Optional[int] =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
a__: List[Any] =rand_fn(__magic_name__ , __magic_name__ , __magic_name__ )
a__: Tuple =rand_fn(__magic_name__ , __magic_name__ , __magic_name__ )
a__: Tuple =rand_fn(__magic_name__ , __magic_name__ , __magic_name__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
a__: Optional[Any] =gcd(hare - tortoise , __magic_name__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
a__: Dict =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
__UpperCAmelCase = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 42
| 0
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_lowerCAmelCase = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "dhaka" , UpperCamelCase = 5 ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = min(UpperCamelCase , 50 ) # Prevent abuse!
lowerCAmelCase__ : List[Any] = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
lowerCAmelCase__ : List[Any] = requests.get("""https://www.google.com/search""" , params=UpperCamelCase , headers=UpperCamelCase )
lowerCAmelCase__ : List[Any] = BeautifulSoup(html.text , """html.parser""" )
lowerCAmelCase__ : int = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
lowerCAmelCase__ : Dict = json.dumps(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = json.loads(UpperCamelCase )
lowerCAmelCase__ : Dict = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , UpperCamelCase , )
if not matched_google_image_data:
return 0
lowerCAmelCase__ : Tuple = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(UpperCamelCase ) , )
lowerCAmelCase__ : Union[str, Any] = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , UpperCamelCase , )
for index, fixed_full_res_image in enumerate(UpperCamelCase ):
if index >= max_images:
return index
lowerCAmelCase__ : Optional[int] = bytes(UpperCamelCase , """ascii""" ).decode(
"""unicode-escape""" )
lowerCAmelCase__ : Optional[Any] = bytes(UpperCamelCase , """ascii""" ).decode(
"""unicode-escape""" )
lowerCAmelCase__ : Tuple = urllib.request.build_opener()
lowerCAmelCase__ : int = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(UpperCamelCase )
lowerCAmelCase__ : List[Any] = f"""query_{query.replace(' ' , '_' )}"""
if not os.path.exists(UpperCamelCase ):
os.makedirs(UpperCamelCase )
urllib.request.urlretrieve( # noqa: S310
UpperCamelCase , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
_lowerCAmelCase = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print('''Please provide a search term.''')
raise
| 37
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowerCAmelCase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 37
| 1
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE_ )['input_ids']
SCREAMING_SNAKE_CASE = len(example['content'] ) / len(output['input_ids'] )
return output
__UpperCamelCase = HfArgumentParser(PretokenizationArguments)
__UpperCamelCase = parser.parse_args()
if args.num_workers is None:
__UpperCamelCase = multiprocessing.cpu_count()
__UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__UpperCamelCase = time.time()
__UpperCamelCase = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
__UpperCamelCase = time.time()
__UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 358
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''allenai/led-base-16384''': 16384,
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , lowerCAmelCase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
SCREAMING_SNAKE_CASE = value
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> dict:
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 38
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =KandinskyVaaControlnetPipeline
lowercase_ : Dict =['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowercase_ : str =['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowercase_ : Dict =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase_ : str =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 1_0_0
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase = UNetaDConditionModel(**A__)
return model
@property
def A__ ( self):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = VQModel(**self.dummy_movq_kwargs)
return model
def A__ ( self):
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='''linear''' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=A__ ,set_alpha_to_one=A__ ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=A__ ,)
lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A__ ( self ,A__ ,A__=0):
lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
A__)
# create hint
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(A__)).to(A__)
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(A__) ,return_dict=A__ ,)[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''')
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''')
lowercase = torch.from_numpy(np.array(A__)).float() / 255.0
lowercase = hint.permute(2 ,0 ,1).unsqueeze(0)
lowercase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa)
pipe_prior.to(A__)
lowercase = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' ,torch_dtype=torch.floataa)
lowercase = pipeline.to(A__)
pipeline.set_progress_bar_config(disable=A__)
lowercase = '''A robot, 4k photo'''
lowercase = torch.Generator(device='''cuda''').manual_seed(0)
lowercase , lowercase = pipe_prior(
A__ ,generator=A__ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowercase = torch.Generator(device='''cuda''').manual_seed(0)
lowercase = pipeline(
image_embeds=A__ ,negative_image_embeds=A__ ,hint=A__ ,generator=A__ ,num_inference_steps=1_0_0 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(A__ ,A__)
| 101
|
"""simple docstring"""
from manim import *
class lowercase ( __UpperCAmelCase):
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = Rectangle(height=0.5 , width=0.5 )
A_ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : List[str] = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Dict = Text('''CPU''' , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : int = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : List[str] = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.align_to(_lowerCamelCase , _lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCamelCase )
A_ : List[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = Text('''Model''' , font_size=24 )
A_ : Optional[int] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , )
A_ : List[str] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
A_ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Dict = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=2.5 ) , Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.add(_lowerCamelCase )
A_ : str = []
A_ : Any = []
A_ : Tuple = []
for i, rect in enumerate(_lowerCamelCase ):
A_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
cpu_target.move_to(_lowerCamelCase )
cpu_target.generate_target()
A_ : List[str] = 0.46 / 4
A_ : List[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCamelCase , buff=0.0 )
cpu_targs.append(_lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCamelCase ) )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 167
| 0
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if num < 0:
return False
A = num
A = 0
while num > 0:
A = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __UpperCamelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE = WavaVecaPhonemeCTCTokenizer
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ (self : Tuple):
super().setUp()
A = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" ")
A = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
A = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + "\n")
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[Any]=2_0 , __SCREAMING_SNAKE_CASE : Any=5):
A = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)) for i in range(len(__SCREAMING_SNAKE_CASE))]
A = list(filter(lambda __SCREAMING_SNAKE_CASE: [t[0]] == tokenizer.encode(t[1] , do_phonemize=__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE))
if max_length is not None and len(__SCREAMING_SNAKE_CASE) > max_length:
A = toks[:max_length]
if min_length is not None and len(__SCREAMING_SNAKE_CASE) < min_length and len(__SCREAMING_SNAKE_CASE) > 0:
while len(__SCREAMING_SNAKE_CASE) < min_length:
A = toks + toks
# toks_str = [t[1] for t in toks]
A = [t[0] for t in toks]
# Ensure consistency
A = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)
if " " not in output_txt and len(__SCREAMING_SNAKE_CASE) > 1:
A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)
)
if with_prefix_space:
A = " " + output_txt
A = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE)
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__ (self : List[Any] , **__SCREAMING_SNAKE_CASE : Any):
kwargs.update(self.special_tokens_map)
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
# check adding a single token
tokenizer.add_tokens("xxx")
A = tokenizer("m xxx ɪ" , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , [1_3, 3_9_2, 1_7]) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"])
A = tokenizer("m aaa ɪ ccc" , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , [1_3, 3_9_3, 1_7, 3_9_5]) # aaa and ccc should be after xxx and 2 after aaa
A = tokenizer("maɪ c" , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , [3, 2_0_0]) # mai should be <unk> (=3)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(__SCREAMING_SNAKE_CASE , "h ə l oʊ h aʊ ɑːɹ j uː")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , tokenizer(__SCREAMING_SNAKE_CASE , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids)
def SCREAMING_SNAKE_CASE__ (self : Any):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
A = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE).input_ids)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
A = tokenizer.decode(sample_ids[0])
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0])
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(__SCREAMING_SNAKE_CASE , "h ə l oʊ | h aʊ | ɑːɹ | j uː |")
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , tokenizer(__SCREAMING_SNAKE_CASE , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
A = tokenizer.decode(sample_ids[0])
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0])
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
# decode with no word_del_token filter
A = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0])
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"])
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
A = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
A = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip() , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__SCREAMING_SNAKE_CASE)
A = "Hello how are you"
A = tokenizer(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us").input_ids
A = tokenizer(__SCREAMING_SNAKE_CASE , phonemizer_lang="fr-fr").input_ids
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = tokenizer.decode(__SCREAMING_SNAKE_CASE)
A = tokenizer.decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , "h ə l oʊ h aʊ ɑːɹ j uː")
self.assertEqual(__SCREAMING_SNAKE_CASE , "ɛ l o h aʊ a ʁ j u")
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how Are you"
A = "hello how are you"
A = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
A = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
tokenizer.add_tokens(["!", "?"])
tokenizer.add_special_tokens({"cls_token": "$$$"})
# fmt: off
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"])
@staticmethod
def SCREAMING_SNAKE_CASE__ (__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]):
A = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = self.get_tokenizer(word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
A = tokenizer.decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()) , 2)
self.assertTrue("text" in outputs)
self.assertTrue("char_offsets" in outputs)
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char")) , outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char") , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"])
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset") , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6])
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset") , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7])
def SCREAMING_SNAKE_CASE__ (self : Any):
A = self.get_tokenizer(word_delimiter_token="|")
def check_list_tuples_equal(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any]):
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
self.assertTrue(isinstance(outputs_list[0] , __SCREAMING_SNAKE_CASE))
# transform list to ModelOutput
A = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]})
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"])
def recursive_check(__SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
[recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for la, la in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"])
# fmt: off
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE)
A = [tokenizer.decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE) for ids in sample_ids]
check_list_tuples_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes")
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes")
def SCREAMING_SNAKE_CASE__ (self : Dict):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency")
def SCREAMING_SNAKE_CASE__ (self : str):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing")
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
pass
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
A = tokenizer.vocab_size
A = len(__SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A = ["aaaaa bbbbbb", "cccccccccdddddddd"]
A = tokenizer.add_tokens(__SCREAMING_SNAKE_CASE)
A = tokenizer.vocab_size
A = len(__SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE))
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size + len(__SCREAMING_SNAKE_CASE))
A = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__SCREAMING_SNAKE_CASE)
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
A = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
A = tokenizer.add_special_tokens(__SCREAMING_SNAKE_CASE)
A = tokenizer.vocab_size
A = len(__SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE))
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size_a + len(__SCREAMING_SNAKE_CASE))
A = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__SCREAMING_SNAKE_CASE)
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
pass
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
A = self.get_tokenizers(fast=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
A = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
A = tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(output["text"] , __SCREAMING_SNAKE_CASE)
| 57
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class SCREAMING_SNAKE_CASE__ ( a__ ):
_a = 'decision_transformer'
_a = ['past_key_values']
_a = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , lowerCAmelCase : str=17 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Any=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : List[Any]=1024 , lowerCAmelCase : Any=3 , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any="relu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Tuple=1e-5 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=5_0256 , lowerCAmelCase : List[Any]=5_0256 , lowerCAmelCase : int=False , lowerCAmelCase : List[str]=False , **lowerCAmelCase : str , ):
lowerCAmelCase = state_dim
lowerCAmelCase = act_dim
lowerCAmelCase = hidden_size
lowerCAmelCase = max_ep_len
lowerCAmelCase = action_tanh
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 155
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if not isinstance(_snake_case , _snake_case ):
raise TypeError('''only integers accepted as input''' )
else:
lowerCAmelCase : List[str] = str(abs(_snake_case ) )
lowerCAmelCase : Optional[Any] = [list(_snake_case ) for char in range(len(_snake_case ) )]
for index in range(len(_snake_case ) ):
num_transpositions[index].pop(_snake_case )
return max(
int(''''''.join(list(_snake_case ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 60
| 0
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER
A : int = True
A : Dict = 'ml.p3.2xlarge'
A : Tuple = 'accelerate_sagemaker_execution_role'
A : Optional[Any] = 'hf-sm'
A : Tuple = 'us-east-1'
A : Any = 1
A : Optional[Any] = 'accelerate-sagemaker-1'
A : Dict = '1.6'
A : int = '4.4'
A : Tuple = 'train.py'
A : List[str] = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
A : Optional[int] = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case_ : int = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["do_train"] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["epochs"] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["learning_rate"] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args["max_steps"] , _SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 368
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase__ ( _a : str ):
snake_case_ : str = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ : Optional[Any] = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ : Any = 0.01
with locka.acquire():
with pytest.raises(_a ):
snake_case_ : Optional[int] = time.time()
locka.acquire(_a )
assert time.time() - _start > timeout
def lowerCAmelCase__ ( _a : Union[str, Any] ):
snake_case_ : List[str] = "a" * 10_00 + ".lock"
snake_case_ : Optional[int] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(_a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
snake_case_ : int = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_a ):
locka.acquire(0 )
| 36
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''')
A__ = BertTokenizer.from_pretrained('''bert-base-uncased''')
A__ = bertabert.config.encoder.vocab_size
A__ = tokenizer.sep_token_id
A__ = tokenizer.cls_token_id
A__ = 128
A__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''')
A__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''')
A__ = train_dataset.select(range(32))
A__ = val_dataset.select(range(16))
A__ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase__ : List[str]):
# Tokenizer will automatically set [BOS] <text> [EOS]
A__ = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=UpperCAmelCase__ , max_length=512)
A__ = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=UpperCAmelCase__ , max_length=128)
A__ = inputs.input_ids
A__ = inputs.attention_mask
A__ = outputs.input_ids
A__ = outputs.input_ids.copy()
A__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
A__ = outputs.attention_mask
assert all(len(UpperCAmelCase__) == 512 for x in inputs.input_ids)
assert all(len(UpperCAmelCase__) == 128 for x in outputs.input_ids)
return batch
def _compute_metrics(UpperCAmelCase__ : Tuple):
A__ = pred.label_ids
A__ = pred.predictions
# all unnecessary tokens are removed
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = sum([int(pred_str[i] == label_str[i]) for i in range(len(UpperCAmelCase__))]) / len(UpperCAmelCase__)
return {"accuracy": accuracy}
# map train dataset
A__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
A__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
A__ = self.get_auto_remove_tmp_dir()
A__ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase__ , per_device_train_batch_size=UpperCAmelCase__ , per_device_eval_batch_size=UpperCAmelCase__ , predict_with_generate=UpperCAmelCase__ , evaluation_strategy='''steps''' , do_train=UpperCAmelCase__ , do_eval=UpperCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A__ = SeqaSeqTrainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , )
# start training
trainer.train()
| 14
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = "cpu"
lowercase : Optional[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowercase : Optional[int] = "path-to-your-trained-model"
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Optional[int] = torch.rand(1) * 999
lowercase : Optional[Any] = torch.randn(2, 77, 768)
lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : List[str] = 666
lowercase : Tuple = torch.Generator(device).manual_seed(seed)
lowercase : Union[str, Any] = {"generator": generator}
if args.steps is not None:
lowercase : Dict = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 42
| 0
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowerCamelCase : Optional[int] = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
_lowerCamelCase : Dict = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
_lowerCamelCase : Optional[int] = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def A ( self : Union[str, Any] , lowercase : List[str] , lowercase : List[str] , lowercase : Any=4 , lowercase : Optional[Any]=False ):
'''simple docstring'''
_snake_case = compute_bleu(
reference_corpus=lowercase , translation_corpus=lowercase , max_order=lowercase , smooth=lowercase )
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 130
|
from __future__ import annotations
_lowerCamelCase : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCamelCase : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
_snake_case = len(__lowercase )
for i in range(__lowercase ):
_snake_case = -1
for j in range(i + 1 , __lowercase ):
if arr[i] < arr[j]:
_snake_case = arr[j]
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
for i, outer in enumerate(__lowercase ):
_snake_case = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case = inner
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = len(__lowercase )
_snake_case = []
_snake_case = [-1] * arr_size
for index in reversed(range(__lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCamelCase : Union[str, Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 130
| 1
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = torch.nn.Linear(2 , 4 )
UpperCAmelCase_ : Tuple = torch.optim.AdamW(model.parameters() , lr=1.0 )
UpperCAmelCase_ : List[Any] = torch.optim.lr_scheduler.OneCycleLR(__snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
UpperCAmelCase_ : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCAmelCase_ : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : List[str] = Accelerator(cpu=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Optional[int] = Accelerator()
UpperCAmelCase_ : Dict = GradientState()
assert state.num_steps == 1
UpperCAmelCase_ : Any = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCAmelCase_ : Tuple = False
assert state.sync_gradients is False
GradientState._reset_state()
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = create_components()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Any = accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __UpperCAmelCase ( self ) -> Tuple:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_UpperCamelCase , **_UpperCamelCase ):
pass
with patch('torch.cuda.set_device' , _UpperCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
UpperCAmelCase_ : Any = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = get_signature(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase )
# make sure random weights don't match
load_random_weights(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) < 1E-3 )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = get_signature(_UpperCamelCase )
# saving hook
def save_config(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = {'class_name': models[0].__class__.__name__}
with open(os.path.join(_UpperCamelCase , 'data.json' ) , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
# loading hook
def load_config(_UpperCamelCase , _UpperCamelCase ):
with open(os.path.join(_UpperCamelCase , 'data.json' ) , 'r' ) as f:
UpperCAmelCase_ : Optional[Any] = json.load(_UpperCamelCase )
UpperCAmelCase_ : str = config['class_name']
UpperCAmelCase_ : Union[str, Any] = accelerator.register_save_state_pre_hook(_UpperCamelCase )
UpperCAmelCase_ : Any = accelerator.register_load_state_pre_hook(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase )
# make sure random weights don't match with hooks
load_random_weights(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_ : List[str] = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_ : Union[str, Any] = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = create_components()
UpperCAmelCase_ : Tuple = None
# This should work
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertTrue(dummy_obj is None )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = create_components()
UpperCAmelCase_ : List[Any] = [1, 2, 3]
# This should work
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_UpperCamelCase , device_map={'': 0} , )
UpperCAmelCase_ : List[str] = Accelerator()
# This should work
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(_UpperCamelCase )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Any:
from transformers import AutoModelForCausalLM
UpperCAmelCase_ : Optional[int] = Accelerator()
with init_empty_weights():
UpperCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
UpperCAmelCase_ : Any = infer_auto_device_map(_UpperCamelCase )
UpperCAmelCase_ : int = 'cpu'
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=_UpperCamelCase , load_in_abit=_UpperCamelCase , llm_inta_enable_fpaa_cpu_offload=_UpperCamelCase )
# This should not work and get value error
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = accelerator.prepare(_UpperCamelCase )
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
UpperCAmelCase_ : List[Any] = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
UpperCAmelCase_ : Optional[Any] = infer_auto_device_map(_UpperCamelCase )
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_UpperCamelCase , device_map=_UpperCamelCase , )
UpperCAmelCase_ : str = Accelerator()
# This should not work and get value error
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Tuple:
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
UpperCAmelCase_ : Tuple = infer_auto_device_map(_UpperCamelCase )
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_UpperCamelCase , device_map=_UpperCamelCase , )
UpperCAmelCase_ : int = Accelerator()
# This should work
UpperCAmelCase_ : Any = accelerator.prepare(_UpperCamelCase )
@require_cuda
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = torch.nn.Linear(1_0 , 1_0 )
UpperCAmelCase_ : Dict = torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCAmelCase_ : Any = Accelerator(cpu=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(_UpperCamelCase )
| 29
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 38
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : int = "vit"
def __init__( self : Any , _lowerCAmelCase : Optional[int]=7_68 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=30_72 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : str=1e-12 , _lowerCAmelCase : int=2_24 , _lowerCAmelCase : int=16 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=16 , **_lowerCAmelCase : Dict , ):
super().__init__(**_lowerCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : str = image_size
__snake_case : str = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : List[Any] = qkv_bias
__snake_case : Dict = encoder_stride
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : str = version.parse("1.11" )
@property
def snake_case__ ( self : Optional[int] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : Optional[Any] ):
return 1e-4
| 20
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 20
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 0
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''MCTCTFeatureExtractor'''
lowerCamelCase_ = '''AutoTokenizer'''
def __init__( self , lowercase , lowercase ):
"""simple docstring"""
super().__init__(lowercase , lowercase )
A_ : Dict = self.feature_extractor
A_ : Tuple = False
def __call__( self , *lowercase , **lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A_ : str = kwargs.pop('raw_speech' )
else:
A_ : List[str] = kwargs.pop('audio' , lowercase )
A_ : List[Any] = kwargs.pop('sampling_rate' , lowercase )
A_ : str = kwargs.pop('text' , lowercase )
if len(lowercase ) > 0:
A_ : str = args[0]
A_ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A_ : Optional[int] = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
A_ : List[Any] = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ : Tuple = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase , **lowercase )
A_ : Optional[int] = kwargs.pop('input_features' , lowercase )
A_ : List[Any] = kwargs.pop('labels' , lowercase )
if len(lowercase ) > 0:
A_ : str = args[0]
A_ : Union[str, Any] = args[1:]
if input_features is not None:
A_ : List[Any] = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
if labels is not None:
A_ : List[str] = self.tokenizer.pad(lowercase , **lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A_ : int = labels['input_ids']
return input_features
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def lowerCAmelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A_ : Optional[int] = True
A_ : str = self.tokenizer
yield
A_ : Any = self.feature_extractor
A_ : Dict = False
| 192
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.dummy_uncond_unet
A_ : List[Any] = DDIMScheduler()
A_ : Any = self.dummy_vq_model
A_ : int = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' ).images
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : str = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy' ).images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A_ : Tuple = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
A_ : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 192
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.