code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : str = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[Any] = AutoModel.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : List[str] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Any = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[int] = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase)
a__ , a__ : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase)
a__ , a__ : Any = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : str = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : List[str] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase)
a__ , a__ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Tuple = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase)
a__ , a__ : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase)
a__ , a__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase)
a__ , a__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : int = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Any = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : List[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : str = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
a__ : Any = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
a__ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
| 99 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''blip_2_vision_model'''
def __init__( self , lowerCamelCase__=1_408 , lowerCamelCase__=6_144 , lowerCamelCase__=39 , lowerCamelCase__=16 , lowerCamelCase__=224 , lowerCamelCase__=14 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0_00_01 , lowerCamelCase__=0.0 , lowerCamelCase__=1e-10 , lowerCamelCase__=True , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(**__a )
__lowerCamelCase = hidden_size
__lowerCamelCase = intermediate_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = patch_size
__lowerCamelCase = image_size
__lowerCamelCase = initializer_range
__lowerCamelCase = attention_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = hidden_act
__lowerCamelCase = qkv_bias
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__a )
__lowerCamelCase , __lowerCamelCase = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__lowerCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''blip_2_qformer'''
def __init__( self , lowerCamelCase__=30_522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=0 , lowerCamelCase__="absolute" , lowerCamelCase__=2 , lowerCamelCase__=1_408 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = cross_attention_frequency
__lowerCamelCase = encoder_hidden_size
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__a )
__lowerCamelCase , __lowerCamelCase = cls.get_config_dict(__a , **__a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__lowerCamelCase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''blip-2'''
snake_case_ = True
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=32 , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__(**__a )
if vision_config is None:
__lowerCamelCase = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
__lowerCamelCase = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
__lowerCamelCase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__lowerCamelCase = BlipaVisionConfig(**__a )
__lowerCamelCase = BlipaQFormerConfig(**__a )
__lowerCamelCase = text_config['model_type'] if 'model_type' in text_config else 'opt'
__lowerCamelCase = CONFIG_MAPPING[text_model_type](**__a )
__lowerCamelCase = self.text_config.tie_word_embeddings
__lowerCamelCase = self.text_config.is_encoder_decoder
__lowerCamelCase = num_query_tokens
__lowerCamelCase = self.vision_config.hidden_size
__lowerCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowerCamelCase = 1.0
__lowerCamelCase = 0.02
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__a , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.vision_config.to_dict()
__lowerCamelCase = self.qformer_config.to_dict()
__lowerCamelCase = self.text_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 362 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase ( self : List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[Any] = ort.SessionOptions()
snake_case__ : List[Any] = False
return options
def lowerCamelCase ( self : str ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = """A red cat sitting on a park bench"""
snake_case__ : Optional[Any] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Optional[Any] = output.images
snake_case__ : Dict = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case__ : Optional[int] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case__ : Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = """A red cat sitting on a park bench"""
snake_case__ : Optional[int] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Tuple = output.images
snake_case__ : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case__ : Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 35 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__snake_case : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case : Tuple = '''A painting of a squirrel eating a burger'''
__snake_case : str = torch.manual_seed(0 )
__snake_case : int = sd_pipe([prompt] , generator=a_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case : List[str] = output.images
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Dict = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case : str = '''A painting of a squirrel eating a burger'''
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Dict = sd_pipe([prompt] , generator=a_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case : Any = output.images
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Tuple = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__snake_case : int = '''A painting of a squirrel eating a burger'''
__snake_case : Optional[Any] = torch.manual_seed(0 )
__snake_case : Dict = sd_pipe(
[prompt] , generator=a_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=a_ , )
__snake_case : Optional[int] = output.images
__snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Any = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 102 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase : Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
for attribute in key.split('''.''' ):
lowerCAmelCase__ : str = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
lowerCAmelCase__ : Dict = getattr(snake_case_ , snake_case_ ).shape
else:
lowerCAmelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase__ : int = value
elif weight_type == "weight_g":
lowerCAmelCase__ : str = value
elif weight_type == "weight_v":
lowerCAmelCase__ : Union[str, Any] = value
elif weight_type == "bias":
lowerCAmelCase__ : Union[str, Any] = value
else:
lowerCAmelCase__ : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : str = fairseq_model.state_dict()
lowerCAmelCase__ : List[str] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase__ : str = None
for name, value in fairseq_dict.items():
lowerCAmelCase__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase__ : List[str] = True
elif name.split('''.''' )[0] == "proj":
lowerCAmelCase__ : Optional[Any] = fairseq_model.proj
lowerCAmelCase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
lowerCAmelCase__ : Optional[Any] = name.split(snake_case_ )[0].split('''.''' )[-2]
lowerCAmelCase__ : List[str] = mapped_key.replace('''*''' , snake_case_ )
if "weight_g" in name:
lowerCAmelCase__ : str = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : Optional[int] = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : List[str] = """bias"""
elif "weight" in name:
lowerCAmelCase__ : List[str] = """weight"""
else:
lowerCAmelCase__ : int = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : List[str] = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase__ : int = name.split('''.''' )
lowerCAmelCase__ : Any = int(items[0] )
lowerCAmelCase__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase__ : List[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase__ : List[str] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase__ : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : str = emb.weight.shape
lowerCAmelCase__ : Union[str, Any] = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
lowerCAmelCase__ : List[str] = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( A_ ):
with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : int = f.readlines()
lowerCAmelCase__ : List[Any] = [line.split(''' ''' )[0] for line in lines]
lowerCAmelCase__ : int = len(snake_case_ )
lowerCAmelCase__ : List[str] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
lowerCAmelCase__ : List[Any] = WavaVecaConfig.from_pretrained(snake_case_ )
lowerCAmelCase__ : Tuple = SpeechaTextaConfig.from_pretrained(
snake_case_ , vocab_size=snake_case_ , decoder_layers=snake_case_ , do_stable_layer_norm=snake_case_ )
lowerCAmelCase__ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
lowerCAmelCase__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCAmelCase__ : Any = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase__ : Optional[int] = WavaVecaModel(snake_case_ )
lowerCAmelCase__ : List[str] = recursively_load_weights_wavaveca(model.encoder , snake_case_ )
lowerCAmelCase__ : List[Any] = SpeechaTextaForCausalLM(snake_case_ )
lowerCAmelCase__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowerCAmelCase__ : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCAmelCase__ : Tuple = SpeechEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ )
lowerCAmelCase__ : Dict = False
# add projection layer
lowerCAmelCase__ : Optional[Any] = nn.Parameter(projection_layer.weight )
lowerCAmelCase__ : int = nn.Parameter(projection_layer.bias )
lowerCAmelCase__ : Optional[Any] = create_vocab_dict(snake_case_ )
with open(os.path.join(snake_case_ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(snake_case_ , snake_case_ )
lowerCAmelCase__ : List[str] = SpeechaTextaTokenizer(os.path.join(snake_case_ , '''vocab.json''' ) )
tokenizer.save_pretrained(snake_case_ )
lowerCAmelCase__ : Dict = hf_wavavec.config.to_dict()
lowerCAmelCase__ : str = tokenizer.pad_token_id
lowerCAmelCase__ : List[str] = tokenizer.bos_token_id
lowerCAmelCase__ : List[Any] = tokenizer.eos_token_id
lowerCAmelCase__ : int = """speech_to_text_2"""
lowerCAmelCase__ : Any = """wav2vec2"""
lowerCAmelCase__ : str = SpeechEncoderDecoderConfig.from_dict(snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__UpperCamelCase : List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 355 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__UpperCamelCase : Any = NewType('''DataClass''', Any)
__UpperCamelCase : List[str] = NewType('''DataClassType''', Any)
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = {str(A_ ): choice for choice in choices}
return lambda A_ : str_to_choice.get(A_ , A_ )
def __SCREAMING_SNAKE_CASE ( *,
A_ = None , A_ = None , A_ = dataclasses.MISSING , A_ = dataclasses.MISSING , A_ = None , **A_ , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase__ : Dict = {}
if aliases is not None:
lowerCAmelCase__ : int = aliases
if help is not None:
lowerCAmelCase__ : Optional[int] = help
return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
def __init__( self : Dict ,lowercase_ : Union[DataClassType, Iterable[DataClassType]] ,**lowercase_ : str ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase__ : Tuple = ArgumentDefaultsHelpFormatter
super().__init__(**lowercase_ )
if dataclasses.is_dataclass(lowercase_ ):
lowerCAmelCase__ : Tuple = [dataclass_types]
lowerCAmelCase__ : List[str] = list(lowercase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowercase_ )
@staticmethod
def __lowerCAmelCase ( lowercase_ : ArgumentParser ,lowercase_ : dataclasses.Field ):
lowerCAmelCase__ : Dict = F'--{field.name}'
lowerCAmelCase__ : List[str] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,lowercase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase__ : List[str] = kwargs.pop('''aliases''' ,[] )
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Optional[Any] = [aliases]
lowerCAmelCase__ : Union[str, Any] = getattr(field.type ,'''__origin__''' ,field.type )
if origin_type is Union or (hasattr(lowercase_ ,'''UnionType''' ) and isinstance(lowercase_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F' Problem encountered in field \'{field.name}\'.' )
if type(lowercase_ ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase__ : List[str] = getattr(field.type ,'''__origin__''' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase__ : Optional[int] = (
field.type.__args__[0] if isinstance(lowercase_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase__ : Optional[Any] = getattr(field.type ,'''__origin__''' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase__ : List[Any] = {}
if origin_type is Literal or (isinstance(field.type ,lowercase_ ) and issubclass(field.type ,lowercase_ )):
if origin_type is Literal:
lowerCAmelCase__ : Union[str, Any] = field.type.__args__
else:
lowerCAmelCase__ : Optional[Any] = [x.value for x in field.type]
lowerCAmelCase__ : List[str] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase__ : int = field.default
else:
lowerCAmelCase__ : Any = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase__ : List[Any] = copy(lowercase_ )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase__ : Tuple = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase__ : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase__ : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase__ : Union[str, Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase__ : Any = True
elif isclass(lowercase_ ) and issubclass(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : List[str] = field.type.__args__[0]
lowerCAmelCase__ : str = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase__ : Dict = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase__ : str = True
else:
lowerCAmelCase__ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase__ : str = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase__ : Any = field.default_factory()
else:
lowerCAmelCase__ : Optional[Any] = True
parser.add_argument(lowercase_ ,*lowercase_ ,**lowercase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase__ : Optional[Any] = False
parser.add_argument(F'--no_{field.name}' ,action='''store_false''' ,dest=field.name ,**lowercase_ )
def __lowerCAmelCase ( self : str ,lowercase_ : DataClassType ):
if hasattr(lowercase_ ,'''_argument_group_name''' ):
lowerCAmelCase__ : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase__ : List[str] = self
try:
lowerCAmelCase__ : Dict[str, type] = get_type_hints(lowercase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowercase_ ):
lowerCAmelCase__ : int = '''.'''.join(map(lowercase_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowercase_ ):
if not field.init:
continue
lowerCAmelCase__ : Any = type_hints[field.name]
self._parse_dataclass_field(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[Any]=None ,lowercase_ : str=False ,lowercase_ : str=True ,lowercase_ : Any=None ,lowercase_ : List[str]=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase__ : int = []
if args_filename:
args_files.append(Path(lowercase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase__ : List[str] = ArgumentParser()
args_file_parser.add_argument(lowercase_ ,type=lowercase_ ,action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = args_file_parser.parse_known_args(args=lowercase_ )
lowerCAmelCase__ : int = vars(lowercase_ ).get(args_file_flag.lstrip('''-''' ) ,lowercase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] )
lowerCAmelCase__ : Tuple = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase__ : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.parse_known_args(args=lowercase_ )
lowerCAmelCase__ : Optional[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase__ : int = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowerCAmelCase__ : int = {k: v for k, v in vars(lowercase_ ).items() if k in keys}
for k in keys:
delattr(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = dtype(**lowercase_ )
outputs.append(lowercase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowercase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def __lowerCAmelCase ( self : Any ,lowercase_ : Dict[str, Any] ,lowercase_ : bool = False ):
lowerCAmelCase__ : List[Any] = set(args.keys() )
lowerCAmelCase__ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase__ : Optional[Any] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowerCAmelCase__ : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase__ : Union[str, Any] = dtype(**lowercase_ )
outputs.append(lowercase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}' )
return tuple(lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : bool = False ):
with open(Path(lowercase_ ) ,encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase__ : Union[str, Any] = json.loads(open_json_file.read() )
lowerCAmelCase__ : List[str] = self.parse_dict(lowercase_ ,allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ,lowercase_ : bool = False ):
lowerCAmelCase__ : Tuple = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) ,allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
| 74 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Optional[int] = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _a ( _lowercase):
"""simple docstring"""
UpperCamelCase__ = """sew"""
def __init__( self : List[str] , __UpperCamelCase : Tuple=3_2 , __UpperCamelCase : Tuple=7_6_8 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : List[str]=3_0_7_2 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : List[Any]=1e-5 , __UpperCamelCase : Union[str, Any]="group" , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCamelCase : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCamelCase : Dict=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=1_2_8 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : List[Any]=0.0_5 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : int=1_0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[Any]="mean" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : int=False , __UpperCamelCase : int=2_5_6 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : List[Any]=2 , **__UpperCamelCase : Optional[int] , )->List[str]:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(__a )
_UpperCAmelCase = list(__a )
_UpperCAmelCase = list(__a )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = squeeze_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# sequence classification
_UpperCAmelCase = use_weighted_layer_sum
_UpperCAmelCase = classifier_proj_size
@property
def lowercase__ ( self : Optional[int] )->Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 260 |
"""simple docstring"""
import re
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 153 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase ={
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370 |
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
A = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def _A ( _a : int ):
"""simple docstring"""
return sum(int(_a ) for c in str(abs(_a ) ) )
def _A ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77 | 0 |
from __future__ import annotations
def A (__A : str ) -> list[int]:
"""simple docstring"""
return [ord(__A ) - 96 for elem in plain]
def A (__A : list[int] ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def A () -> None:
"""simple docstring"""
UpperCAmelCase_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , __A )
print('''Decoded:''' , decode(__A ) )
if __name__ == "__main__":
main()
| 51 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : int = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = '''xlm-roberta'''
def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : int=7_68 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=30_72 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 274 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowercase ( A ):
'''simple docstring'''
def __get__( self : List[Any] , _a : Any , _a : Dict=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCamelCase__ = '''__cached_''' + self.fget.__name__
UpperCamelCase__ = getattr(_a , _a , _a )
if cached is None:
UpperCamelCase__ = self.fget(_a )
setattr(_a , _a , _a )
return cached
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
if is_torch_fx_proxy(UpperCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase__, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase__, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase__, (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase__, np.ndarray )
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
return isinstance(UpperCamelCase__, np.ndarray )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return _is_numpy(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import torch
return isinstance(UpperCamelCase__, torch.Tensor )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import torch
return isinstance(UpperCamelCase__, torch.device )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Dict ):
'''simple docstring'''
import torch
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
if hasattr(UpperCamelCase__, UpperCamelCase__ ):
UpperCamelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
else:
return False
return isinstance(UpperCamelCase__, torch.dtype )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Dict ):
'''simple docstring'''
import tensorflow as tf
return isinstance(UpperCamelCase__, tf.Tensor )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase__, '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCamelCase__ )
return type(UpperCamelCase__ ) == tf.Tensor
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase__, jnp.ndarray )
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if isinstance(UpperCamelCase__, (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__, (list, tuple) ):
return [to_py_obj(UpperCamelCase__ ) for o in obj]
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ ).tolist()
elif isinstance(UpperCamelCase__, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ):
'''simple docstring'''
if isinstance(UpperCamelCase__, (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__, (list, tuple) ):
return np.array(UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ )
else:
return obj
class __lowercase ( A ):
'''simple docstring'''
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = fields(self )
# Safety and consistency checks
if not len(_a ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCamelCase__ = getattr(self , class_fields[0].name )
UpperCamelCase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_a ):
if isinstance(_a , _a ):
UpperCamelCase__ = first_field.items()
UpperCamelCase__ = True
else:
try:
UpperCamelCase__ = iter(_a )
UpperCamelCase__ = True
except TypeError:
UpperCamelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_a ):
if (
not isinstance(_a , (list, tuple) )
or not len(_a ) == 2
or not isinstance(element[0] , _a )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCamelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCamelCase__ = element[1]
elif first_field is not None:
UpperCamelCase__ = first_field
else:
for field in class_fields:
UpperCamelCase__ = getattr(self , field.name )
if v is not None:
UpperCamelCase__ = v
def __delitem__( self : Any , *_a : Dict , **_a : int ):
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def A_ ( self : str , *_a : Union[str, Any] , **_a : List[str] ):
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def A_ ( self : Optional[int] , *_a : Dict , **_a : Optional[Any] ):
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def A_ ( self : Tuple , *_a : Optional[int] , **_a : Optional[int] ):
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : int , _a : Tuple ):
if isinstance(_a , _a ):
UpperCamelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[str] , _a : List[str] , _a : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_a , _a )
super().__setattr__(_a , _a )
def __setitem__( self : Any , _a : Union[str, Any] , _a : int ):
# Will raise a KeyException if needed
super().__setitem__(_a , _a )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_a , _a )
def A_ ( self : List[Any] ):
return tuple(self[k] for k in self.keys() )
class __lowercase ( A, A ):
'''simple docstring'''
@classmethod
def A_ ( cls : str , _a : List[Any] ):
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __lowercase ( A ):
'''simple docstring'''
_A : Tuple = '''longest'''
_A : str = '''max_length'''
_A : List[str] = '''do_not_pad'''
class __lowercase ( A ):
'''simple docstring'''
_A : int = '''pt'''
_A : List[Any] = '''tf'''
_A : int = '''np'''
_A : Any = '''jax'''
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , _a : List[ContextManager] ):
UpperCamelCase__ = context_managers
UpperCamelCase__ = ExitStack()
def __enter__( self : Tuple ):
for context_manager in self.context_managers:
self.stack.enter_context(_a )
def __exit__( self : Union[str, Any] , *_a : List[str] , **_a : Any ):
self.stack.__exit__(*_a , **_a )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = infer_framework(UpperCamelCase__ )
if framework == "tf":
UpperCamelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = model_class.__name__
UpperCamelCase__ = infer_framework(UpperCamelCase__ )
if framework == "tf":
UpperCamelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase_ ( UpperCamelCase__ : MutableMapping, UpperCamelCase__ : str = "", UpperCamelCase__ : str = "." ):
'''simple docstring'''
def _flatten_dict(UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any]="", UpperCamelCase__ : str="." ):
for k, v in d.items():
UpperCamelCase__ = str(UpperCamelCase__ ) + delimiter + str(UpperCamelCase__ ) if parent_key else k
if v and isinstance(UpperCamelCase__, UpperCamelCase__ ):
yield from flatten_dict(UpperCamelCase__, UpperCamelCase__, delimiter=UpperCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) )
@contextmanager
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : bool = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase_ ( UpperCamelCase__ : Tuple, UpperCamelCase__ : Tuple=None ):
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.transpose(UpperCamelCase__, axes=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.T if axes is None else array.permute(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase__, perm=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.transpose(UpperCamelCase__, axes=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase__ )}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict ):
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.reshape(UpperCamelCase__, UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.reshape(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase__, UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.reshape(UpperCamelCase__, UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase__ )}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int]=None ):
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.squeeze(UpperCamelCase__, axis=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase__, axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.squeeze(UpperCamelCase__, axis=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase__ )}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict ):
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.expand_dims(UpperCamelCase__, UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.unsqueeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase__, axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.expand_dims(UpperCamelCase__, axis=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase__ )}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.size(UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.size(UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase__ )}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(UpperCamelCase__, (tuple, list) ):
UpperCamelCase__ = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCamelCase__ = F"""{repo_id}--{value}"""
return auto_map
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
for base_class in inspect.getmro(UpperCamelCase__ ):
UpperCamelCase__ = base_class.__module__
UpperCamelCase__ = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 35 | import datasets
from .evaluate import evaluate
lowercase = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowercase = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowercase = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def A_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def A_ ( self : Optional[Any] , _a : List[str] , _a : Optional[int] ):
UpperCamelCase__ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
UpperCamelCase__ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
UpperCamelCase__ = evaluate(dataset=_a , predictions=_a )
return score
| 35 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = {}
if "threshold" in kwargs:
__lowerCAmelCase : int = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = load_image(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.IntTensor([[image.height, image.width]] )
__lowerCAmelCase : int = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
__lowerCAmelCase : Tuple = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
__lowerCAmelCase : str = target_size
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('target_size' )
__lowerCAmelCase : int = self.model(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
__lowerCAmelCase : Dict = model_inputs['bbox']
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9 ):
__lowerCAmelCase : Union[str, Any] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__lowerCAmelCase , __lowerCAmelCase : int = target_size[0].tolist()
def unnormalize(_SCREAMING_SNAKE_CASE ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__lowerCAmelCase : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__lowerCAmelCase : Any = [unnormalize(_SCREAMING_SNAKE_CASE ) for bbox in model_outputs['bbox'].squeeze(0 )]
__lowerCAmelCase : List[str] = ['score', 'label', 'box']
__lowerCAmelCase : Tuple = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for vals in zip(scores.tolist() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__lowerCAmelCase : Tuple = self.image_processor.post_process_object_detection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = raw_annotations[0]
__lowerCAmelCase : Dict = raw_annotation['scores']
__lowerCAmelCase : Dict = raw_annotation['labels']
__lowerCAmelCase : int = raw_annotation['boxes']
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : Any = [self.model.config.idalabel[label.item()] for label in labels]
__lowerCAmelCase : Optional[int] = [self._get_bounding_box(_SCREAMING_SNAKE_CASE ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__lowerCAmelCase : List[Any] = ['score', 'label', 'box']
__lowerCAmelCase : str = [
dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = box.int().tolist()
__lowerCAmelCase : str = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox | 86 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase_ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase_ = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowercase_ = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
'''simple docstring'''
A__ = None
# source code of `config_class`
A__ = inspect.getsource(SCREAMING_SNAKE_CASE__ )
A__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
A__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A__ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A__ = ckpt_name
break
return checkpoint
def _snake_case( ) -> Any:
'''simple docstring'''
A__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE__ )
A__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
A__ = '\n'.join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 282 |
import random
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : str ) -> tuple:
'''simple docstring'''
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE__ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE__ )
else:
equal.append(SCREAMING_SNAKE_CASE__ )
return less, equal, greater
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if index >= len(SCREAMING_SNAKE_CASE__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE__ , index - (m + count) )
| 282 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : str = SwinvaConfig()
SCREAMING_SNAKE_CASE__ : str = swinva_name.split("""_""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name_split[1]
if "to" in name_split[3]:
SCREAMING_SNAKE_CASE__ : Dict = int(name_split[3][-3:] )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
SCREAMING_SNAKE_CASE__ : List[Any] = int(name_split[2][-2:] )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = int(name_split[2][6:] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : str = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Dict = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE__ : List[Any] = 128
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Any = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 192
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Dict = (6, 12, 24, 48)
if "to" in swinva_name:
SCREAMING_SNAKE_CASE__ : str = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
SCREAMING_SNAKE_CASE__ : Dict = 21_841
SCREAMING_SNAKE_CASE__ : str = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : Any = """imagenet-22k-id2label.json"""
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : List[str] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = idalabel
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE__ : Dict = 1_000
SCREAMING_SNAKE_CASE__ : Any = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : int = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = img_size
SCREAMING_SNAKE_CASE__ : Dict = num_classes
SCREAMING_SNAKE_CASE__ : Tuple = embed_dim
SCREAMING_SNAKE_CASE__ : str = depths
SCREAMING_SNAKE_CASE__ : str = num_heads
SCREAMING_SNAKE_CASE__ : List[str] = window_size
return config
def lowercase_ ( _snake_case ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""patch_embed.norm""" ,"""embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE__ : Tuple = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ : Dict = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "q_bias" in name:
SCREAMING_SNAKE_CASE__ : int = name.replace("""q_bias""" ,"""query.bias""" )
if "k_bias" in name:
SCREAMING_SNAKE_CASE__ : Dict = name.replace("""k_bias""" ,"""key.bias""" )
if "v_bias" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""v_bias""" ,"""value.bias""" )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE__ : Dict = name.replace("""cpb_mlp""" ,"""continuous_position_bias_mlp""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE__ : Tuple = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE__ : Dict = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace("""head""" ,"""classifier""" )
else:
SCREAMING_SNAKE_CASE__ : str = """swinv2.""" + name
return name
def lowercase_ ( _snake_case ,_snake_case ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Any = orig_state_dict.pop(_snake_case )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE__ : int = key.split(""".""" )
SCREAMING_SNAKE_CASE__ : Any = int(key_split[1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(key_split[3] )
SCREAMING_SNAKE_CASE__ : Any = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE__ : Any = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ : List[str] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ : Any = val[:dim]
SCREAMING_SNAKE_CASE__ : int = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Optional[int] = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
return orig_state_dict
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = timm.create_model(_snake_case ,pretrained=_snake_case )
timm_model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_swinva_config(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = SwinvaForImageClassification(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : str = convert_state_dict(timm_model.state_dict() ,_snake_case )
model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" ,"""-""" ) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw )
SCREAMING_SNAKE_CASE__ : Dict = image_processor(images=_snake_case ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : int = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE__ : Dict = model(**_snake_case ).logits
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
model.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nandwalritik""" ,commit_message="""Add model""" ,)
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase__ : int = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 25 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase = mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowercase = max(
mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowercase = val
return f[i][j]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if not (isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
lowercase = len(__SCREAMING_SNAKE_CASE )
if num_items != len(__SCREAMING_SNAKE_CASE ):
lowercase = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(__SCREAMING_SNAKE_CASE )} values'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , __SCREAMING_SNAKE_CASE ):
lowercase = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = set()
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
optimal_set.add(__SCREAMING_SNAKE_CASE )
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = [3, 2, 4, 4]
UpperCAmelCase = [4, 3, 2, 3]
UpperCAmelCase = 4
UpperCAmelCase = 6
UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 195 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
SCREAMING_SNAKE_CASE = 50_0000
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = os.path.split(__file__)
SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _SCREAMING_SNAKE_CASE ( lowercase_ , **lowercase_ ) -> Union[str, Any]:
A__ = dataset.map(**a_ )
@get_duration
def _SCREAMING_SNAKE_CASE ( lowercase_ , **lowercase_ ) -> List[str]:
A__ = dataset.filter(**a_ )
def _SCREAMING_SNAKE_CASE ( ) -> Any:
A__ = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
A__ = generate_example_dataset(
os.path.join(a_ , "dataset.arrow" ) , a_ , num_examples=a_ )
A__ = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=a_ )
def tokenize(lowercase_ ):
return tokenizer(examples["text"] )
A__ = map(a_ )
A__ = map(a_ , batched=a_ )
A__ = map(a_ , function=lambda lowercase_ : None , batched=a_ )
with dataset.formatted_as(type="numpy" ):
A__ = map(a_ , function=lambda lowercase_ : None , batched=a_ )
with dataset.formatted_as(type="pandas" ):
A__ = map(a_ , function=lambda lowercase_ : None , batched=a_ )
with dataset.formatted_as(type="torch" , columns="numbers" ):
A__ = map(a_ , function=lambda lowercase_ : None , batched=a_ )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
A__ = map(a_ , function=lambda lowercase_ : None , batched=a_ )
A__ = map(a_ , function=a_ , batched=a_ )
A__ = filter(a_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a_ , "wb" ) as f:
f.write(json.dumps(a_ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 364 |
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes:
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
return baseaa.aaadecode(lowercase_ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : List[str] =IFPipeline
A__ : str =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
A__ : Optional[int] =TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Dict =PipelineTesterMixin.required_optional_params - {"""latents"""}
def A_ ( self : Dict ):
return self._get_dummy_components()
def A_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=0 ):
if str(UpperCAmelCase_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A_ ( self : Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def A_ ( self : str ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def A_ ( self : Dict ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def A_ ( self : List[str] ):
self._test_save_load_local()
def A_ ( self : Dict ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A_ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def A_ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict ):
# if
SCREAMING_SNAKE_CASE__ = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , num_inference_steps=2 , generator=UpperCAmelCase_ , output_type='np' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=2 , generator=UpperCAmelCase_ , output_type='np' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , original_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , num_inference_steps=2 , generator=UpperCAmelCase_ , output_type='np' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , original_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 176 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCAmelCase , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Any , UpperCAmelCase_ : GenericTensor ):
if self.framework == "tf":
SCREAMING_SNAKE_CASE__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def A_ ( self : Optional[Any] , UpperCAmelCase_ : GenericTensor ):
SCREAMING_SNAKE_CASE__ = self.get_masked_index(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : GenericTensor ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Union[str, Any] ):
if return_tensors is None:
SCREAMING_SNAKE_CASE__ = self.framework
SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
self.ensure_exactly_one_mask_token(UpperCAmelCase_ )
return model_inputs
def A_ ( self : Tuple , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ = self.model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model_inputs['input_ids']
return model_outputs
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE__ = target_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE__ = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE__ = outputs.numpy()
SCREAMING_SNAKE_CASE__ = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE__ = stable_softmax(UpperCAmelCase_ , axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE__ = tf.gather_nd(tf.squeeze(UpperCAmelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(UpperCAmelCase_ , 0 )
SCREAMING_SNAKE_CASE__ = tf.math.top_k(UpperCAmelCase_ , k=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE__ = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE__ = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE__ = probs[..., target_ids]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = probs.topk(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
SCREAMING_SNAKE_CASE__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE__ = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE__ = target_ids[p].tolist()
SCREAMING_SNAKE_CASE__ = p
# Filter padding out:
SCREAMING_SNAKE_CASE__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(UpperCAmelCase_ )
result.append(UpperCAmelCase_ )
if single_mask:
return result[0]
return result
def A_ ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=None ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [targets]
try:
SCREAMING_SNAKE_CASE__ = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = []
for target in targets:
SCREAMING_SNAKE_CASE__ = vocab.get(UpperCAmelCase_ , UpperCAmelCase_ )
if id_ is None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , max_length=1 , truncation=UpperCAmelCase_ , )['input_ids']
if len(UpperCAmelCase_ ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE__ = list(set(UpperCAmelCase_ ) )
if len(UpperCAmelCase_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE__ = np.array(UpperCAmelCase_ )
return target_ids
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None ):
SCREAMING_SNAKE_CASE__ = {}
if targets is not None:
SCREAMING_SNAKE_CASE__ = self.get_target_ids(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : Tuple , UpperCAmelCase_ : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE__ = super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) == 1:
return outputs[0]
return outputs
| 176 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Tuple = logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ''
else:
lowerCAmelCase__ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = ViTConfig()
lowerCAmelCase__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ = True
lowerCAmelCase__ = int(vit_name[-12:-10] )
lowerCAmelCase__ = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ = 1000
lowerCAmelCase__ = 'huggingface/label-files'
lowerCAmelCase__ = 'imagenet-1k-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = int(vit_name[-6:-4] )
lowerCAmelCase__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowerCAmelCase__ = 192
lowerCAmelCase__ = 768
lowerCAmelCase__ = 12
lowerCAmelCase__ = 3
elif vit_name[9:].startswith('small' ):
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1536
lowerCAmelCase__ = 12
lowerCAmelCase__ = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowerCAmelCase__ = 768
lowerCAmelCase__ = 2304
lowerCAmelCase__ = 8
lowerCAmelCase__ = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowerCAmelCase__ = 1024
lowerCAmelCase__ = 4096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
elif vit_name[4:].startswith('huge' ):
lowerCAmelCase__ = 1280
lowerCAmelCase__ = 5120
lowerCAmelCase__ = 32
lowerCAmelCase__ = 16
# load original model from timm
lowerCAmelCase__ = timm.create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ = ViTModel(UpperCamelCase_ ).eval()
else:
lowerCAmelCase__ = ViTForImageClassification(UpperCamelCase_ ).eval()
model.load_state_dict(UpperCamelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ = encoding['pixel_values']
lowerCAmelCase__ = model(UpperCamelCase_ )
if base_model:
lowerCAmelCase__ = timm_model.forward_features(UpperCamelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase_ , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ = timm_model(UpperCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase_ , outputs.logits , atol=1e-3 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__snake_case : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 122 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case : Tuple = """sshleifer/mar_enro_6_3_student"""
class __SCREAMING_SNAKE_CASE ( __lowercase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_UpperCamelCase , )
lowerCAmelCase__ = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
MarianMTModel.from_pretrained(_UpperCamelCase )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowerCAmelCase__ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase__ = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase__ = ['finetune.py'] + bash_script.split() + args
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationModule.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class __SCREAMING_SNAKE_CASE ( __lowercase):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
lowerCAmelCase__ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowerCAmelCase__ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
lowerCAmelCase__ = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = bash_script.replace('--fp16' , '' )
lowerCAmelCase__ = 6
lowerCAmelCase__ = (
['distillation.py']
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
'--gpus=1',
'--learning_rate=1e-3',
F"--num_train_epochs={epochs}",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationDistiller.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase__ = distill_main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 122 | 1 |
from collections.abc import Callable
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: float = a
SCREAMING_SNAKE_CASE_: float = b
if function(_UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCAmelCase ) == 0:
return b
elif (
function(_UpperCAmelCase ) * function(_UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
SCREAMING_SNAKE_CASE_: float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCAmelCase ) == 0:
return mid
elif function(_UpperCAmelCase ) * function(_UpperCAmelCase ) < 0:
SCREAMING_SNAKE_CASE_: Dict = mid
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = mid
SCREAMING_SNAKE_CASE_: Dict = start + (end - start) / 2.0
return mid
def A_ ( _UpperCAmelCase ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 13 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowercase: List[str] = pd.read_csv("sample_data.csv", header=None)
__lowercase: Tuple = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowercase: Dict = df.iloc[:, 1:2]
__lowercase: Any = actual_data.values.reshape(len_data, 1)
__lowercase: Optional[Any] = MinMaxScaler().fit_transform(actual_data)
__lowercase: Optional[Any] = 10
__lowercase: Dict = 5
__lowercase: Union[str, Any] = 20
__lowercase: Tuple = len_data - periods * look_back
__lowercase: Any = actual_data[:division]
__lowercase: Dict = actual_data[division - look_back :]
__lowercase ,__lowercase: Dict = [], []
__lowercase ,__lowercase: Optional[int] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowercase: Dict = np.array(train_x)
__lowercase: Any = np.array(test_x)
__lowercase: Optional[Any] = np.array([list(i.ravel()) for i in train_y])
__lowercase: List[Any] = np.array([list(i.ravel()) for i in test_y])
__lowercase: Tuple = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__lowercase: str = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__lowercase: Optional[int] = model.predict(x_test) | 366 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model | 31 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
_UpperCAmelCase : Dict = tempfile.mkdtemp()
_UpperCAmelCase : Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCAmelCase : int = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def _A ( self : Dict , **A : List[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def _A ( self : List[str] , **A : Tuple ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def _A ( self : Dict , **A : Dict ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **A )
def _A ( self : str ):
shutil.rmtree(self.tmpdirname )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase : Any = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Optional[Any] = AlignProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_UpperCAmelCase : List[Any] = AlignProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def _A ( self : List[Any] ):
_UpperCAmelCase : Tuple = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Optional[int] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : str = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
_UpperCAmelCase : Dict = image_processor(A , return_tensors="np" )
_UpperCAmelCase : Optional[Any] = processor(images=A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : Tuple ):
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[str] = "lower newer"
_UpperCAmelCase : Any = processor(text=A )
_UpperCAmelCase : Optional[Any] = tokenizer(A , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self : int ):
_UpperCAmelCase : Optional[int] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : str = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : Dict = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def _A ( self : int ):
_UpperCAmelCase : Optional[int] = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : str = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : str = processor.batch_decode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def _A ( self : Tuple ):
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = AlignProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = "lower newer"
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 31 | '''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , A : Dict , A : Optional[Any]=13 , A : Optional[Any]=7 , A : Union[str, Any]=True , A : Optional[Any]=True , A : int=False , A : str=True , A : Optional[Any]=99 , A : Union[str, Any]=32 , A : int=5 , A : Tuple=4 , A : Union[str, Any]=37 , A : Dict="gelu" , A : Union[str, Any]=0.1 , A : str=0.1 , A : Union[str, Any]=512 , A : int=16 , A : List[str]=2 , A : Tuple=0.02 , A : int=3 , A : List[str]=4 , A : str=None , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : int = seq_length
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : Any = use_input_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : List[str] = num_choices
_UpperCAmelCase : List[str] = scope
def _A ( self : Optional[int] ):
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : Dict ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def _A ( self : int , A : List[Any] , A : Any , A : int , A : Union[str, Any] , A : Dict , A : List[Any] , A : Dict ):
_UpperCAmelCase : List[str] = BioGptModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(A , attention_mask=A )
_UpperCAmelCase : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : List[Any] , A : str , A : List[Any] , A : Dict , A : List[Any] , A : List[str] , A : Union[str, Any] , A : int , A : List[str] , A : Dict , ):
_UpperCAmelCase : Optional[int] = BioGptForCausalLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[Any] , A : str , A : str , A : str , A : Any , A : List[str] , *A : Optional[int] ):
_UpperCAmelCase : str = BioGptModel(config=A )
model.to(A )
model.eval()
# create attention mask
_UpperCAmelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=A )
_UpperCAmelCase : Optional[int] = self.seq_length // 2
_UpperCAmelCase : List[Any] = 0
# first forward pass
_UpperCAmelCase , _UpperCAmelCase : List[str] = model(A , attention_mask=A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_UpperCAmelCase : List[str] = ids_tensor((1,) , A ).item() + 1
_UpperCAmelCase : str = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_UpperCAmelCase : Any = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCAmelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase : Optional[int] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A )] , dim=1 , )
# get two different outputs
_UpperCAmelCase : List[Any] = model(A , attention_mask=A )["last_hidden_state"]
_UpperCAmelCase : Optional[Any] = model(A , past_key_values=A , attention_mask=A )["last_hidden_state"]
# select random slice
_UpperCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def _A ( self : int , A : Dict , A : str , A : Dict , A : Union[str, Any] , A : Any , *A : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = BioGptModel(config=A ).to(A ).eval()
_UpperCAmelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=A )
# first forward pass
_UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , use_cache=A )
_UpperCAmelCase , _UpperCAmelCase : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase : Dict = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_UpperCAmelCase : Any = model(A , attention_mask=A )["last_hidden_state"]
_UpperCAmelCase : Dict = model(A , attention_mask=A , past_key_values=A )[
"last_hidden_state"
]
# select random slice
_UpperCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def _A ( self : Optional[Any] , A : Tuple , A : List[str] , A : Tuple , A : Dict , A : List[Any] , *A : Tuple , A : List[str]=False ):
_UpperCAmelCase : Optional[int] = BioGptForCausalLM(A )
model.to(A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCAmelCase : Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _A ( self : Optional[Any] , A : Any , *A : Optional[Any] ):
_UpperCAmelCase : Tuple = BioGptModel(A )
_UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _A ( self : Optional[int] , A : Dict , A : Tuple , A : Optional[int] , A : int , A : List[str] , *A : Dict ):
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Any = BioGptForTokenClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , attention_mask=A , token_type_ids=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : int ):
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[str] = config_and_inputs
_UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: List[str] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCamelCase: List[str] = (BioGptForCausalLM,) if is_torch_available() else ()
__UpperCamelCase: str = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase: Union[str, Any] = False
def _A ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = BioGptModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=A , hidden_size=37 )
def _A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _A ( self : Any ):
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _A ( self : Any ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*A )
def _A ( self : int ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A , gradient_checkpointing=A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A )
def _A ( self : Dict ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A )
def _A ( self : Dict ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A )
@slow
def _A ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
_UpperCAmelCase : Tuple = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_UpperCAmelCase : str = "left"
# Define PAD Token = EOS Token = 50256
_UpperCAmelCase : Any = tokenizer.eos_token
_UpperCAmelCase : int = model.config.eos_token_id
# use different length sentences to test batching
_UpperCAmelCase : Any = [
"Hello, my dog is a little",
"Today, I",
]
_UpperCAmelCase : Tuple = tokenizer(A , return_tensors="pt" , padding=A )
_UpperCAmelCase : Optional[Any] = inputs["input_ids"].to(A )
_UpperCAmelCase : Any = model.generate(
input_ids=A , attention_mask=inputs["attention_mask"].to(A ) , )
_UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(A )
_UpperCAmelCase : List[Any] = model.generate(input_ids=A )
_UpperCAmelCase : List[Any] = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
_UpperCAmelCase : int = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(A )
_UpperCAmelCase : int = model.generate(input_ids=A , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase : Dict = tokenizer.batch_decode(A , skip_special_tokens=A )
_UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A )
_UpperCAmelCase : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=A )
_UpperCAmelCase : str = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(A , A )
self.assertListEqual(A , [non_padded_sentence, padded_sentence] )
@slow
def _A ( self : str ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = BioGptModel.from_pretrained(A )
self.assertIsNotNone(A )
def _A ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = 3
_UpperCAmelCase : List[str] = input_dict["input_ids"]
_UpperCAmelCase : Dict = input_ids.ne(1 ).to(A )
_UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase : List[str] = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : List[str] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A ( self : int ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = 3
_UpperCAmelCase : Dict = "multi_label_classification"
_UpperCAmelCase : Optional[Any] = input_dict["input_ids"]
_UpperCAmelCase : Optional[int] = input_ids.ne(1 ).to(A )
_UpperCAmelCase : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase : Optional[Any] = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
_UpperCAmelCase : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
_UpperCAmelCase : List[Any] = model(A )[0]
_UpperCAmelCase : int = 42384
_UpperCAmelCase : int = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : Any = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1E-4 ) )
@slow
def _A ( self : Any ):
_UpperCAmelCase : str = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_UpperCAmelCase : Tuple = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = tokenizer("COVID-19 is" , return_tensors="pt" ).to(A )
_UpperCAmelCase : Dict = model.generate(
**A , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=A , )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=A )
_UpperCAmelCase : List[str] = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(A , A )
| 31 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366 |
from collections.abc import Generator
def _snake_case( ) -> Generator[int, None, None]:
lowercase , lowercase : List[str] = 0, 1
while True:
lowercase , lowercase : Optional[int] = b, a + b
yield b
def _snake_case( SCREAMING_SNAKE_CASE__ = 1_000 ) -> int:
lowercase : List[Any] = 1
lowercase : Tuple = fibonacci_generator()
while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 285 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = (DDIMParallelScheduler,)
_snake_case : List[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 5_0))
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**_UpperCamelCase )
return config
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> int:
UpperCAmelCase_ : int = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config(**_UpperCamelCase )
UpperCAmelCase_ : Dict = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 1_0, 0.0
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase_ : str = model(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def __UpperCAmelCase ( self ) -> List[str]:
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCamelCase )
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : str = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
self.check_over_configs(thresholding=_UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> int:
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=_UpperCamelCase , num_inference_steps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_UpperCamelCase , eta=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 1_0, 0.0
scheduler.set_timesteps(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
UpperCAmelCase_ : str = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : List[str] = samplea.shape[0]
UpperCAmelCase_ : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : str = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase )
UpperCAmelCase_ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Dict = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : List[Any] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.full_loop()
UpperCAmelCase_ : List[str] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __UpperCAmelCase ( self ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[str] = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Dict = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 )
UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 29 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : str, __A : Dict=1_3, __A : int=3_0, __A : Tuple=2, __A : Union[str, Any]=3, __A : Any=True, __A : str=True, __A : Dict=3_2, __A : List[Any]=2, __A : Optional[Any]=4, __A : Union[str, Any]=3_7, __A : int="gelu", __A : int=0.1, __A : List[Any]=0.1, __A : Tuple=1_0, __A : Tuple=0.0_2, __A : Any=3, __A : List[str]=0.6, __A : Any=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = mask_ratio
UpperCAmelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def __magic_name__ ( self : str, __A : List[Any], __A : Any, __A : Any ):
UpperCAmelCase : Optional[Any] = TFViTMAEModel(config=__A )
UpperCAmelCase : Tuple = model(__A, training=__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : str, __A : int, __A : str ):
UpperCAmelCase : Dict = TFViTMAEForPreTraining(__A )
UpperCAmelCase : int = model(__A, training=__A )
# expected sequence length = num_patches
UpperCAmelCase : int = (self.image_size // self.patch_size) ** 2
UpperCAmelCase : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : List[Any] = TFViTMAEForPreTraining(__A )
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(__A, training=__A )
UpperCAmelCase : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = config_and_inputs
UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = TFViTMAEModelTester(self )
UpperCAmelCase : int = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, tf.keras.layers.Layer ) )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : int = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __magic_name__ ( self : int ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : Dict = model(__A, noise=__A )
UpperCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Union[str, Any] = model(**__A, noise=__A )
UpperCAmelCase : Dict = outputs_dict[0].numpy()
UpperCAmelCase : Tuple = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1E-6 )
def __magic_name__ ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A : Union[str, Any] ):
UpperCAmelCase : str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
UpperCAmelCase : Tuple = v.numpy()
else:
UpperCAmelCase : str = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A )
UpperCAmelCase : Optional[int] = prepare_numpy_arrays(__A )
UpperCAmelCase : str = model(__A, noise=__A )
UpperCAmelCase : str = model(**__A, noise=__A )
self.assert_outputs_same(__A, __A )
def __magic_name__ ( self : int, __A : str, __A : Union[str, Any], __A : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase : Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : int = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase : List[Any] = tf_noise
super().check_pt_tf_models(__A, __A, __A )
def __magic_name__ ( self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__A, __A ),)
if isinstance(__A, __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A, '''_keras_serializable''', __A )
}
UpperCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase : str = tf.convert_to_tensor(__A )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase : Tuple = main_layer_class(__A )
UpperCAmelCase : int = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase : List[Any] = tf.keras.Model(__A, outputs=main_layer(__A ) )
UpperCAmelCase : List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__A, '''keras_model.h5''' )
model.save(__A )
UpperCAmelCase : List[str] = tf.keras.models.load_model(
__A, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A, tf.keras.Model )
UpperCAmelCase : Tuple = model(__A )
self.assert_outputs_same(__A, __A )
@slow
def __magic_name__ ( self : Dict ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : Optional[int] = outputs.last_hidden_state.numpy()
UpperCAmelCase : Union[str, Any] = 0
else:
UpperCAmelCase : Optional[int] = outputs.logits.numpy()
UpperCAmelCase : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A, saved_model=__A )
UpperCAmelCase : Dict = model_class.from_pretrained(__A )
UpperCAmelCase : str = model(__A, noise=__A )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase : int = after_outputs['''last_hidden_state'''].numpy()
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Any = after_outputs['''logits'''].numpy()
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A, 1E-5 )
def __magic_name__ ( self : Optional[Any] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
UpperCAmelCase : List[Any] = model(__A, noise=__A )
UpperCAmelCase : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
UpperCAmelCase : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase : str = model_class.from_config(model.config )
UpperCAmelCase : List[str] = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase : Tuple = new_model(__A, noise=__A )
self.assert_outputs_same(__A, __A )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __magic_name__ ( self : Tuple ):
pass
@slow
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=__A, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase : Optional[int] = ViTMAEConfig()
UpperCAmelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase : Optional[int] = model(**__A, noise=__A )
# verify the logits
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], __A, atol=1E-4 )
| 336 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE : int = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
_SCREAMING_SNAKE_CASE : str = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class UpperCAmelCase__ ( a_ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = ["input_ids", "attention_mask"]
a = DistilBertTokenizer
def __init__( self : Any , __lowerCamelCase : List[str]=None , __lowerCamelCase : str=None , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]="[UNK]" , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : str="[MASK]" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : str , ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ = normalizer_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def lowercase_ ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Any = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 350 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "decision_transformer"
a = ["past_key_values"]
a = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=17 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : Union[str, Any]=4096 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=1 , __lowerCamelCase : List[Any]=1024 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=5_0256 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , **__lowerCamelCase : Tuple , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = state_dim
SCREAMING_SNAKE_CASE__ = act_dim
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = max_ep_len
SCREAMING_SNAKE_CASE__ = action_tanh
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 218 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : list ) -> list:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__UpperCamelCase , __UpperCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a__ : Union[str, Any] =list(range(10, 0, -1))
print(f'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 53 |
'''simple docstring'''
from math import sqrt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase__ : int = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase__ : Optional[Any] = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase__ : Any = False
break
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool"
return status
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) )
lowerCAmelCase__ : str = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase ) ):
for j in range(i + 1 , len(UpperCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase__ : List[Any] = 0
# filters actual prime numbers.
lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase__ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase ):
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : Dict = number
if number == 0 or number == 1:
ans.append(UpperCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase ):
while quotient != 1:
if is_prime(UpperCamelCase ) and (quotient % factor == 0):
ans.append(UpperCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : Optional[int] = 0
# prime factorization of 'number'
lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Any = max(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase__ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = min(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase )
), "'number' must been an int, even and > 2"
lowerCAmelCase__ : Dict = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase )
# run variable for while-loops.
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase__ : Any = True
while i < len_pn and loop:
lowerCAmelCase__ : List[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase__ : Optional[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (len(UpperCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : int = 0
while numbera != 0:
lowerCAmelCase__ : Any = numbera % numbera
lowerCAmelCase__ : str = numbera
lowerCAmelCase__ : List[str] = rest
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase__ : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase__ : int = prime_factorization(UpperCamelCase )
lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase )
elif numbera == 1 or numbera == 1:
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase )
lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase )
for _ in range(max(UpperCamelCase , UpperCamelCase ) ):
ans *= n
else:
lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase )
for _ in range(UpperCamelCase ):
ans *= n
done.append(UpperCamelCase )
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase ):
ans += 1
# precondition
assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime(
UpperCamelCase ), "'ans' must been a prime number and from type int"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number
lowerCAmelCase__ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and ans[0] != p_number_a
and ans[len(UpperCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase__ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and isinstance(UpperCamelCase , UpperCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) )
# precondition
assert (
isinstance(UpperCamelCase , UpperCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase__ : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Optional[Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase__ : Dict = ans
ans += fiba
lowerCAmelCase__ : str = tmp
return ans
| 37 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[Any]=18 , UpperCAmelCase_ : List[Any]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=True , ):
SCREAMING_SNAKE_CASE__ = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = apply_ocr
def A_ ( self : str ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : str =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = LayoutLMvaImageProcessingTester(self )
@property
def A_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'apply_ocr' ) )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def A_ ( self : Dict ):
pass
def A_ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase_ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase_ )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A_ ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A_ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A_ ( self : Dict ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
SCREAMING_SNAKE_CASE__ = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase_ )
self.assertListEqual(encoding.boxes , UpperCAmelCase_ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 169 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = int(number**0.5 )
return number == sq * sq
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> tuple[int, int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE__ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def _lowercase ( UpperCamelCase_ = 35 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = Fraction(0 )
SCREAMING_SNAKE_CASE__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE__ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE__ = x_den * y_den
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
# n=2
SCREAMING_SNAKE_CASE__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE__ = x_den * x_den * y_den * y_den
if is_sq(UpperCamelCase_ ) and is_sq(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
# n=-1
SCREAMING_SNAKE_CASE__ = x_num * y_num
SCREAMING_SNAKE_CASE__ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
# n=2
SCREAMING_SNAKE_CASE__ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCamelCase_ ) and is_sq(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
for num, den in unique_s:
total += Fraction(UpperCamelCase_ , UpperCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 169 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : int ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("Input value must be a 'int' type" )
return bin(UpperCamelCase__ ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = torch.nn.Linear(10 , 10 )
__magic_name__ = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ = Accelerator()
__magic_name__ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 88 | 0 |
'''simple docstring'''
import os
import string
import sys
__a = 1 << 8
__a = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__a = KEYMAP['up']
__a = KEYMAP['left']
if sys.platform == "win32":
__a = []
__a = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__a = ord(str(i))
def __UpperCAmelCase ( ):
if os.name == "nt":
import msvcrt
_UpperCAmelCase : Tuple = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_UpperCAmelCase : List[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_UpperCAmelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_UpperCAmelCase : Any = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_UpperCAmelCase : List[str] = chr(KEYMAP["esc"] )
except KeyError:
_UpperCAmelCase : Tuple = cha[1]
else:
_UpperCAmelCase : Union[str, Any] = ch.decode(lowercase__ )
else:
_UpperCAmelCase : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_UpperCAmelCase : List[str] = sys.stdin.fileno()
_UpperCAmelCase : List[str] = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_UpperCAmelCase : Tuple = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__, termios.TCSADRAIN, lowercase__ )
return ch
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_UpperCAmelCase : Any = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_UpperCAmelCase : Optional[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 362 | '''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: list[int] ):
if not nums:
return 0
_UpperCAmelCase : int = nums[0]
_UpperCAmelCase : Dict = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Any = (
max_excluding + num,
max(a_, a_ ),
)
return max(a_, a_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _A (lowerCAmelCase__ :Optional[int] ) -> List[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class a :
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Any:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = TFVisionTextDualEncoderModel(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Dict:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Optional[int]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Dict:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
_a = TFVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = after_output[0].numpy()
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-5 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Optional[Any]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ )
_a = model(
input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ )
_a = output.vision_model_output.attentions
self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = to_atuple(vision_model.config.image_size )
_a = to_atuple(vision_model.config.patch_size )
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a = output.text_model_output.attentions
self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
_a = np.abs((a - b) ).max()
self.assertLessEqual(__magic_name__ , __magic_name__ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __UpperCAmelCase ( self ) -> Any:
_a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
self.check_save_load(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Any:
_a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> int:
_a , _a = self.get_pretrained_model_and_inputs()
_a = model_a(**__magic_name__ )
_a = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__magic_name__ )
_a = TFVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model_a(**__magic_name__ )
_a = after_outputs[0].numpy()
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-5 )
@require_tf
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Dict:
_a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
_a = 13
_a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[str]:
_a = TFViTModel(__magic_name__ , name='vision_model' )
_a = TFBertModel(__magic_name__ , name='text_model' )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> int:
_a = TFViTModelTester(self )
_a = TFBertModelTester(self )
_a = vit_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a , _a = vision_config_and_inputs
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> List[str]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
_a = 13
_a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> int:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ )
_a = model(
input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ )
_a = output.vision_model_output.attentions
self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_a = to_atuple(vision_model.config.image_size )
_a = to_atuple(vision_model.config.patch_size )
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a = output.text_model_output.attentions
self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> str:
_a = TFDeiTModel(__magic_name__ , name='vision_model' )
_a = TFRobertaModel(__magic_name__ , name='text_model' )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Any:
_a = TFDeiTModelTester(self )
_a = TFRobertaModelTester(self )
_a = vit_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a , _a = vision_config_and_inputs
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Any:
_a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
_a = 13
_a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[str]:
_a = TFCLIPVisionModel(__magic_name__ , name='vision_model' )
_a = TFBertModel(__magic_name__ , name='text_model' )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Any:
_a = TFCLIPVisionModelTester(self )
_a = TFBertModelTester(self )
_a = clip_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class a ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ) -> str:
_a = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=__magic_name__ )
_a = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=__magic_name__ , padding=__magic_name__ , return_tensors='np' )
_a = model(**__magic_name__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_a = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __magic_name__ , atol=1e-3 ) )
| 168 |
'''simple docstring'''
import sys
a_ : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _A (lowerCAmelCase__ :str = N ) -> int:
'''simple docstring'''
_a = -sys.maxsize - 1
for i in range(len(lowerCAmelCase__ ) - 12 ):
_a = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_a = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 168 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__UpperCamelCase : int = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__UpperCamelCase : Union[str, Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
__UpperCamelCase : List[str] = BeautifulSoup(res.text, 'html.parser')
__UpperCamelCase : Union[str, Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 258 | import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( _lowercase , _lowercase ):
# Load checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : List[str] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : Optional[Any] = v
else:
SCREAMING_SNAKE_CASE : List[Any] = v
SCREAMING_SNAKE_CASE : Dict = chkpt['''params''']
SCREAMING_SNAKE_CASE : Optional[Any] = {n: v for n, v in config.items() if not isinstance(_lowercase , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE : Any = chkpt['''dico_word2id''']
SCREAMING_SNAKE_CASE : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : List[str] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_lowercase , _lowercase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 258 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = state_dict.pop(lowerCamelCase__ )
lowercase__ : Any = val
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ : List[str] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ : Optional[int] = value
else:
lowercase__ : Any = value
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : Optional[int] = """"""
if is_panoptic:
lowercase__ : Dict = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ : List[Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : int = in_proj_weight[:256, :]
lowercase__ : Any = in_proj_bias[:256]
lowercase__ : str = in_proj_weight[256:512, :]
lowercase__ : Optional[int] = in_proj_bias[256:512]
lowercase__ : Dict = in_proj_weight[-256:, :]
lowercase__ : str = in_proj_bias[-256:]
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ : Optional[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase__ : Any = """resnet101"""
if "dc5" in model_name:
lowercase__ : Optional[int] = True
lowercase__ : int = """panoptic""" in model_name
if is_panoptic:
lowercase__ : List[str] = 250
else:
lowercase__ : int = 91
lowercase__ : int = """huggingface/label-files"""
lowercase__ : List[str] = """coco-detection-id2label.json"""
lowercase__ : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Any = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : str = idalabel
lowercase__ : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
lowercase__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowercase__ : int = ConditionalDetrImageProcessor(format=lowerCamelCase__ )
# prepare image
lowercase__ : List[str] = prepare_img()
lowercase__ : int = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Optional[Any] = encoding["""pixel_values"""]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
lowercase__ : List[Any] = torch.hub.load("DeppMeng/ConditionalDETR" , lowerCamelCase__ , pretrained=lowerCamelCase__ ).eval()
lowercase__ : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase__ : Optional[Any] = """conditional_detr.""" + src
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ , is_panoptic=lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowercase__ : int = state_dict.pop(lowerCamelCase__ )
lowercase__ : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase__ : List[str] = state_dict.pop(lowerCamelCase__ )
lowercase__ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowercase__ : int = state_dict.pop(lowerCamelCase__ )
lowercase__ : Tuple = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
lowercase__ : Dict = val
# finally, create HuggingFace model and load state dict
lowercase__ : Tuple = ConditionalDetrForSegmentation(lowerCamelCase__ ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
model.push_to_hub(repo_id=lowerCamelCase__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
lowercase__ : Optional[Any] = conditional_detr(lowerCamelCase__ )
lowercase__ : Optional[Any] = model(lowerCamelCase__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 130 |
'''simple docstring'''
from __future__ import annotations
_A : Any ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: dict[str, list[str]] , UpperCamelCase__: str ):
lowerCamelCase__ : str = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ : dict[str, str | None] = {}
lowerCamelCase__ : Any = source_vertex
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = {self.source_vertex}
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Tuple = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ : Tuple = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase__ )
lowerCamelCase__ : List[str] = vertex
queue.append(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ : Tuple = self.parent.get(UpperCamelCase__ )
if target_vertex_parent is None:
lowerCamelCase__ : int = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCamelCase__ )
return self.shortest_path(UpperCamelCase__ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_A : int =Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 41 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : List[Any] , a : int = 65_536 , a : Optional[int] = None , a : int = 2 , a : int = 2 , a : int = 0 , a : str = "fourier" , a : bool = True , a : bool = False , a : float = 0.0 , a : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , a : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , a : Tuple[str] = "UNetMidBlock1D" , a : str = None , a : Tuple[int] = (32, 32, 64) , a : str = None , a : int = 8 , a : int = 1 , a : bool = False , )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = sample_size
# time
if time_embedding_type == "fourier":
lowercase__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=a , log=a , flip_sin_to_cos=a )
lowercase__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=a , downscale_freq_shift=a )
lowercase__ = block_out_channels[0]
if use_timestep_embedding:
lowercase__ = block_out_channels[0] * 4
lowercase__ = TimestepEmbedding(
in_channels=a , time_embed_dim=a , act_fn=a , out_dim=block_out_channels[0] , )
lowercase__ = nn.ModuleList([] )
lowercase__ = None
lowercase__ = nn.ModuleList([] )
lowercase__ = None
# down
lowercase__ = in_channels
for i, down_block_type in enumerate(a ):
lowercase__ = output_channel
lowercase__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase__ = i == len(a ) - 1
lowercase__ = get_down_block(
a , num_layers=a , in_channels=a , out_channels=a , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(a )
# mid
lowercase__ = get_mid_block(
a , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=a , add_downsample=a , )
# up
lowercase__ = list(reversed(a ) )
lowercase__ = reversed_block_out_channels[0]
if out_block_type is None:
lowercase__ = out_channels
else:
lowercase__ = block_out_channels[0]
for i, up_block_type in enumerate(a ):
lowercase__ = output_channel
lowercase__ = (
reversed_block_out_channels[i + 1] if i < len(a ) - 1 else final_upsample_channels
)
lowercase__ = i == len(a ) - 1
lowercase__ = get_up_block(
a , num_layers=a , in_channels=a , out_channels=a , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(a )
lowercase__ = output_channel
# out
lowercase__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowercase__ = get_out_block(
out_block_type=a , num_groups_out=a , embed_dim=block_out_channels[0] , out_channels=a , act_fn=a , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : bool = True , )-> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
lowercase__ = timestep
if not torch.is_tensor(a ):
lowercase__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(a ) and len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(sample.device )
lowercase__ = self.time_proj(a )
if self.config.use_timestep_embedding:
lowercase__ = self.time_mlp(a )
else:
lowercase__ = timestep_embed[..., None]
lowercase__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase__ = ()
for downsample_block in self.down_blocks:
lowercase__ , lowercase__ = downsample_block(hidden_states=a , temb=a )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase__ = self.mid_block(a , a )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase__ = down_block_res_samples[-1:]
lowercase__ = down_block_res_samples[:-1]
lowercase__ = upsample_block(a , res_hidden_states_tuple=a , temb=a )
# 5. post-process
if self.out_block:
lowercase__ = self.out_block(a , a )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=a )
| 269 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 269 | 1 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def lowercase_ ( _lowerCamelCase : Any):
lowercase__ : Optional[int] = _TestCommandArgs(dataset=_lowerCamelCase , all_configs=_lowerCamelCase , save_infos=_lowerCamelCase)
lowercase__ : int = TestCommand(*_lowerCamelCase)
test_command.run()
lowercase__ : Dict = os.path.join(_lowerCamelCase , "README.md")
assert os.path.exists(_lowerCamelCase)
lowercase__ : str = DatasetInfosDict.from_directory(_lowerCamelCase)
lowercase__ : Dict = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
})
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase__ , lowercase__ : Union[str, Any] = getattr(dataset_infos["default"] , _lowerCamelCase), getattr(expected_dataset_infos["default"] , _lowerCamelCase)
if key == "num_bytes":
assert is_apercent_close(_lowerCamelCase , _lowerCamelCase)
elif key == "splits":
assert list(_lowerCamelCase) == list(_lowerCamelCase)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes)
else:
result == expected
| 87 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__ = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : Tuple = PegasusConfig
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : int = """gelu"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=20 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=0 , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = eos_token_id
snake_case = pad_token_id
snake_case = bos_token_id
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
snake_case = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
snake_case = np.concatenate([input_ids, eos_tensor] , axis=1 )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case = prepare_pegasus_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = 20
snake_case = model_class_name(lowerCAmelCase )
snake_case = model.encode(inputs_dict['input_ids'] )
snake_case ,snake_case = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
snake_case = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase , )
snake_case = model.decode(lowerCAmelCase , lowerCAmelCase )
snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = 20
snake_case = model_class_name(lowerCAmelCase )
snake_case = model.encode(inputs_dict['input_ids'] )
snake_case ,snake_case = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
snake_case = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
snake_case = model.decode(lowerCAmelCase , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase )
snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : str=None , _UpperCamelCase : Dict=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
snake_case = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
snake_case = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_lowerCAmelCase : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Dict = False
def snake_case ( self ):
"""simple docstring"""
snake_case = FlaxPegasusModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
snake_case = model_class(lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ):
return model.encode(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )
with self.subTest('JIT Enabled' ):
snake_case = encode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case = encode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case = model_class(lowerCAmelCase )
snake_case = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
snake_case = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
return model.decode(
decoder_input_ids=lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , encoder_outputs=lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
snake_case = decode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case = decode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case = model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowerCAmelCase )
snake_case = np.ones((1, 1) )
snake_case = model(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
snake_case = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
snake_case = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
snake_case = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
snake_case = tokenizer(lowerCAmelCase , return_tensors='np' , truncation=lowerCAmelCase , max_length=5_12 , padding=lowerCAmelCase )
snake_case = model.generate(**lowerCAmelCase , num_beams=2 ).sequences
snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
assert tgt_text == decoded
| 352 | """simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase : Optional[datasets.Features] = None
def lowerCAmelCase__ ( _UpperCamelCase : "pyspark.sql.DataFrame" , _UpperCamelCase : List[int] , ) -> Dict:
"""simple docstring"""
import pyspark
def generate_fn():
snake_case = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
snake_case = df_with_partition_id.select('*' ).where(f"""part_id = {partition_id}""" ).drop('part_id' )
snake_case = partition_df.collect()
snake_case = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase_ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = df
snake_case = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
"""simple docstring"""
yield from self.generate_examples_fn()
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.split_shard_indices_by_worker(lowerCAmelCase , lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.partition_order )
class lowerCAmelCase_ ( datasets.DatasetBuilder ):
"""simple docstring"""
_lowerCAmelCase : List[str] = SparkConfig
def __init__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
import pyspark
snake_case = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case = df
snake_case = working_dir
super().__init__(
cache_dir=lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase , )
def snake_case ( self ):
"""simple docstring"""
def create_cache_and_write_probe(lowerCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase )
snake_case = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCAmelCase , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(lowerCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
snake_case = self.df.count()
snake_case = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case = (
self.df.limit(lowerCAmelCase )
.repartition(1 )
.mapInArrow(lowerCAmelCase , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case = min(lowerCAmelCase , int(approx_total_size / max_shard_size ) )
snake_case = self.df.repartition(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
import pyspark
snake_case = ParquetWriter if file_format == 'parquet' else ArrowWriter
snake_case = os.path.join(self._working_dir , os.path.basename(lowerCAmelCase ) ) if self._working_dir else fpath
snake_case = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case = self.config.features
snake_case = self._writer_batch_size
snake_case = self._fs.storage_options
def write_arrow(lowerCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case = pyspark.TaskContext().taskAttemptId()
snake_case = next(lowerCAmelCase , lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
snake_case = 0
snake_case = writer_class(
features=lowerCAmelCase , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
snake_case = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case ,snake_case = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
snake_case = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
snake_case = pa.Table.from_batches([batch] )
writer.write_table(lowerCAmelCase )
if writer._num_bytes > 0:
snake_case ,snake_case = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCAmelCase ) ):
snake_case = os.path.join(os.path.dirname(lowerCAmelCase ) , os.path.basename(lowerCAmelCase ) )
shutil.move(lowerCAmelCase , lowerCAmelCase )
snake_case = (
self.df.mapInArrow(lowerCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = "arrow" , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
self._validate_cache_dir()
snake_case = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCAmelCase )
snake_case = not is_remote_filesystem(self._fs )
snake_case = os.path.join if is_local else posixpath.join
snake_case = '-TTTTT-SSSSS-of-NNNNN'
snake_case = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
snake_case = path_join(self._output_dir , lowerCAmelCase )
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = []
snake_case = []
for task_id, content in self._prepare_split_single(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCAmelCase )
snake_case = total_num_examples
snake_case = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
snake_case = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
rename(
lowerCAmelCase , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
snake_case = []
snake_case = 0
for i in range(len(lowerCAmelCase ) ):
snake_case ,snake_case = task_id_and_num_shards[i]
for shard_id in range(lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCAmelCase , len(lowerCAmelCase ) ).map(lambda lowerCAmelCase : _rename_shard(*lowerCAmelCase ) ).collect()
else:
# don't use any pattern
snake_case = 0
snake_case = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(lowerCAmelCase , '' ) , )
def snake_case ( self , lowerCAmelCase , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 149 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__SCREAMING_SNAKE_CASE : Optional[List[str]] = None
__SCREAMING_SNAKE_CASE : Any = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__SCREAMING_SNAKE_CASE : Dict = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: bool = True
__UpperCamelCase: Optional[str] = None
# Automatically constructed
__UpperCamelCase: ClassVar[str] = "PIL.Image.Image"
__UpperCamelCase: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__UpperCamelCase: str = field(default="Image" , init=snake_case__ , repr=snake_case__ )
def __call__( self : Any ):
return self.pa_type
def _A ( self : Tuple , A : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(A , A ):
_UpperCAmelCase : Optional[int] = np.array(A )
if isinstance(A , A ):
return {"path": value, "bytes": None}
elif isinstance(A , A ):
return {"path": None, "bytes": value}
elif isinstance(A , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A )
elif isinstance(A , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _A ( self : List[str] , A : dict , A : Optional[Any]=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(A ):
_UpperCAmelCase : List[str] = PIL.Image.open(A )
else:
_UpperCAmelCase : int = path.split("::" )[-1]
try:
_UpperCAmelCase : Optional[Any] = string_to_dict(A , config.HUB_DATASETS_URL )["repo_id"]
_UpperCAmelCase : int = token_per_repo_id.get(A )
except ValueError:
_UpperCAmelCase : Optional[Any] = None
with xopen(A , "rb" , use_auth_token=A ) as f:
_UpperCAmelCase : str = BytesIO(f.read() )
_UpperCAmelCase : List[str] = PIL.Image.open(bytes_ )
else:
_UpperCAmelCase : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _A ( self : Optional[Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def _A ( self : Optional[int] , A : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_UpperCAmelCase : Any = pa.array([None] * len(A ) , type=pa.binary() )
_UpperCAmelCase : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_UpperCAmelCase : str = pa.array([None] * len(A ) , type=pa.string() )
_UpperCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_UpperCAmelCase : Tuple = storage.field("bytes" )
else:
_UpperCAmelCase : str = pa.array([None] * len(A ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_UpperCAmelCase : Tuple = storage.field("path" )
else:
_UpperCAmelCase : Optional[Any] = pa.array([None] * len(A ) , type=pa.string() )
_UpperCAmelCase : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_UpperCAmelCase : List[Any] = pa.array(
[encode_np_array(np.array(A ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_UpperCAmelCase : Union[str, Any] = pa.array([None] * len(A ) , type=pa.string() )
_UpperCAmelCase : Union[str, Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def _A ( self : List[str] , A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(A : Dict ):
with xopen(A , "rb" ) as f:
_UpperCAmelCase : List[str] = f.read()
return bytes_
_UpperCAmelCase : str = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_UpperCAmelCase : int = pa.array(
[os.path.basename(A ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
_UpperCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_UpperCAmelCase : Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCamelCase_ ( _UpperCAmelCase : "PIL.Image.Image" ) -> bytes:
"""simple docstring"""
_UpperCAmelCase : List[str] = BytesIO()
if image.format in list_image_compression_formats():
_UpperCAmelCase : Tuple = image.format
else:
_UpperCAmelCase : Optional[Any] = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCAmelCase , format=_UpperCAmelCase )
return buffer.getvalue()
def UpperCamelCase_ ( _UpperCAmelCase : "PIL.Image.Image" ) -> dict:
"""simple docstring"""
if hasattr(_UpperCAmelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase )}
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
_UpperCAmelCase : Union[str, Any] = array.dtype
_UpperCAmelCase : Tuple = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
_UpperCAmelCase : List[str] = dtype.kind
_UpperCAmelCase : Dict = dtype.itemsize
_UpperCAmelCase : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_UpperCAmelCase : Dict = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_UpperCAmelCase : Optional[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_UpperCAmelCase : Union[str, Any] = dtype_byteorder + dtype_kind + str(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = np.dtype(_UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_UpperCAmelCase : Optional[Any] = PIL.Image.fromarray(array.astype(_UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase )}
def UpperCamelCase_ ( _UpperCAmelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
_UpperCAmelCase , _UpperCAmelCase : Any = first_non_null_value(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCAmelCase , np.ndarray ):
_UpperCAmelCase : Dict = no_op_if_value_is_null(_UpperCAmelCase )
return [obj_to_image_dict_func(_UpperCAmelCase ) for obj in objs]
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : Optional[int] = no_op_if_value_is_null(_UpperCAmelCase )
return [obj_to_image_dict_func(_UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int], _snake_case : List[Any], _snake_case : str=7, _snake_case : Tuple=3, _snake_case : List[str]=3_0, _snake_case : Tuple=4_0_0, _snake_case : Any=True, _snake_case : List[Any]=None, _snake_case : int=0.9, _snake_case : Optional[Any]=None, _snake_case : str=True, _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], ) ->List[Any]:
snake_case__ : int = size if size is not None else {'shortest_edge': 3_0}
snake_case__ : Tuple = crop_size if crop_size is not None else {'height': 3_0, 'width': 3_0}
snake_case__ : Union[str, Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : int = num_channels
snake_case__ : Tuple = min_resolution
snake_case__ : Any = max_resolution
snake_case__ : List[Any] = do_resize_and_center_crop
snake_case__ : str = size
snake_case__ : str = crop_pct
snake_case__ : List[str] = crop_size
snake_case__ : Optional[int] = do_normalize
snake_case__ : Tuple = image_mean
snake_case__ : Tuple = image_std
def lowercase_ ( self : Optional[int] ) ->int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PoolFormerImageProcessor if is_vision_available() else None
def lowercase_ ( self : Union[str, Any] ) ->Dict:
snake_case__ : Union[str, Any] = PoolFormerImageProcessingTester(self )
@property
def lowercase_ ( self : int ) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_snake_case, 'size' ) )
self.assertTrue(hasattr(_snake_case, 'crop_pct' ) )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'image_mean' ) )
self.assertTrue(hasattr(_snake_case, 'image_std' ) )
def lowercase_ ( self : List[str] ) ->List[str]:
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 3_0} )
self.assertEqual(image_processor.crop_size, {'height': 3_0, 'width': 3_0} )
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size, {'height': 8_4, 'width': 8_4} )
def lowercase_ ( self : List[Any] ) ->List[Any]:
pass
def lowercase_ ( self : List[str] ) ->str:
# Initialize image_processing
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : str = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : int ) ->List[Any]:
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, np.ndarray )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : List[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : List[str] ) ->List[str]:
# Initialize image_processing
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : Optional[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 277 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_a )
class lowerCAmelCase_( _a ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ) -> Union[str, Any]:
lowerCAmelCase__ : List[Any] = {}
if top_k is not None:
lowerCAmelCase__ : Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
return super().__call__(__lowerCAmelCase ,**__lowerCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : List[Any] = load_image(__lowerCAmelCase )
lowerCAmelCase__ : Any = self.image_processor(images=__lowerCAmelCase ,return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = self.model(**__lowerCAmelCase )
return model_outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=5 ) -> Optional[int]:
if top_k > self.model.config.num_labels:
lowerCAmelCase__ : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase__ : Tuple = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase__ , lowerCAmelCase__ : Any = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowerCAmelCase__ : Dict = stable_softmax(model_outputs.logits ,axis=-1 )[0]
lowerCAmelCase__ : Union[str, Any] = tf.math.top_k(__lowerCAmelCase ,k=__lowerCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : List[str] = scores.tolist()
lowerCAmelCase__ : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase ,__lowerCAmelCase )]
| 371 |
'''simple docstring'''
from PIL import Image
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = image.size
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : int = image.load()
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
lowerCAmelCase__ : int = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(UpperCamelCase ):
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Dict = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowerCAmelCase = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 184 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__lowerCamelCase : List[Any] = 6_37_81_37.0
__lowerCamelCase : Tuple = 6_35_67_52.31_42_45
__lowerCamelCase : str = 6_37_81_37
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
SCREAMING_SNAKE_CASE_ : Dict = atan((1 - flattening) * tan(radians(__UpperCamelCase ) ) )
SCREAMING_SNAKE_CASE_ : Dict = atan((1 - flattening) * tan(radians(__UpperCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
SCREAMING_SNAKE_CASE_ : Tuple = haversine_distance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
SCREAMING_SNAKE_CASE_ : str = (b_lata + b_lata) / 2
SCREAMING_SNAKE_CASE_ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
SCREAMING_SNAKE_CASE_ : str = (sin(__UpperCamelCase ) ** 2) * (cos(__UpperCamelCase ) ** 2)
SCREAMING_SNAKE_CASE_ : List[Any] = cos(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE_ : List[Any] = (sigma - sin(__UpperCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
SCREAMING_SNAKE_CASE_ : List[Any] = (cos(__UpperCamelCase ) ** 2) * (sin(__UpperCamelCase ) ** 2)
SCREAMING_SNAKE_CASE_ : Tuple = sin(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE_ : List[str] = (sigma + sin(__UpperCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 | 0 |
from timeit import timeit
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ = 0
while number:
number &= number - 1
result += 1
return result
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
def do_benchmark(__a :int ) -> None:
A__ = """import __main__ as z"""
print(F'Benchmark when {number = }:' )
print(F'{get_set_bits_count_using_modulo_operator(__a ) = }' )
A__ = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__a )
print(F'timeit() runs in {timing} seconds' )
print(F'{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }' )
A__ = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__a , )
print(F'timeit() runs in {timing} seconds' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 276 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Dict = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''pegasus'''
__lowerCamelCase : Any = ['''past_key_values''']
__lowerCamelCase : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , __lowerCAmelCase : str=5_02_65 , __lowerCAmelCase : Dict=10_24 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=40_96 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Optional[int]=40_96 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=10_24 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : str=False , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : Optional[int]=1 , **__lowerCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
@property
def a_ ( self : Dict ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def a_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.d_model
| 276 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = """beit"""
def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
return 1E-4
| 347 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ = 2_5_0_0_0_4
lowercase__ = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MBartaaTokenizer
lowerCamelCase = MBartaaTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Union[str, Any] = MBartaaTokenizer(UpperCamelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case : Dict = '''<s>'''
snake_case : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(UpperCamelCase__ ) , 1054 )
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = MBartaaTokenizer(UpperCamelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=UpperCamelCase__ )
snake_case : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
snake_case : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case : Tuple = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case : Any = {'''input_ids''': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : Any = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
snake_case : Optional[int] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(UpperCamelCase__ )
snake_case : Tuple = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case : Optional[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
snake_case : List[str] = tokenizer_r.from_pretrained(UpperCamelCase__ )
snake_case : Tuple = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=True
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
snake_case : str = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
snake_case : Optional[int] = tokenizer_r.from_pretrained(UpperCamelCase__ )
snake_case : Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=False
snake_case : Tuple = tempfile.mkdtemp()
snake_case : Union[str, Any] = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
snake_case : Any = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase__ )
snake_case : Tuple = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCamelCase = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def lowerCAmelCase ( cls : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
snake_case : List[str] = 1
return cls
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_0038 )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
snake_case : Optional[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
snake_case : str = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
snake_case : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case : Tuple = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , UpperCamelCase__ )
snake_case : List[str] = 10
snake_case : str = self.tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ ).input_ids[0]
self.assertEqual(ids[0] , UpperCamelCase__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_0053, 25_0001] )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = tempfile.mkdtemp()
snake_case : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase__ )
snake_case : Optional[Any] = MBartaaTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase__ )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors='''pt''' )
snake_case : Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
snake_case : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case : str = self.tokenizer(self.src_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=3 , return_tensors='''pt''' )
snake_case : int = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10 , return_tensors='''pt''' )
snake_case : List[Any] = targets['''input_ids''']
snake_case : List[Any] = shift_tokens_right(UpperCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case : int = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_0004, 62, 3034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
} , )
| 359 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__SCREAMING_SNAKE_CASE ) , """Tatoeba directory does not exist.""" )
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
snake_case : Tuple = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case ,snake_case : Dict = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 83 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = LayoutLMTokenizer
lowerCamelCase = LayoutLMTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def UpperCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
A_ : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__A )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict:
A_ : Union[str, Any] = """UNwant\u00E9d,running"""
A_ : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[Any] = self.tokenizer_class(self.vocab_file )
A_ : Optional[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
| 344 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = 8
# DPR tok
__UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) )
__UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def _lowerCamelCase ( self : Tuple ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Optional[int] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCamelCase ( self : Any , __A : bool ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' )
__UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def _lowerCamelCase ( self : int ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Optional[Any] ):
import torch
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__UpperCamelCase = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 53 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _UpperCAmelCase (UpperCamelCase_ : Dataset , UpperCamelCase_ : Dict[str, str] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = args.log_outputs
_lowerCAmelCase : List[str] = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
_lowerCAmelCase : List[Any] = load_metric("""wer""" )
_lowerCAmelCase : Union[str, Any] = load_metric("""cer""" )
# compute metrics
_lowerCAmelCase : str = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
_lowerCAmelCase : Any = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
_lowerCAmelCase : Optional[int] = F"WER: {wer_result}\nCER: {cer_result}"
print(UpperCamelCase_ )
with open(F"{dataset_id}_eval_results.txt" , """w""" ) as f:
f.write(UpperCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_lowerCAmelCase : Union[str, Any] = F"log_{dataset_id}_predictions.txt"
_lowerCAmelCase : int = F"log_{dataset_id}_targets.txt"
with open(UpperCamelCase_ , """w""" ) as p, open(UpperCamelCase_ , """w""" ) as t:
# mapping function to write output
def write_to_file(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
p.write(F"{i}" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F"{i}" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(UpperCamelCase_ , with_indices=UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : int = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_lowerCAmelCase : Optional[int] = re.sub(UpperCamelCase_ , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_lowerCAmelCase : Optional[Any] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
_lowerCAmelCase : Union[str, Any] = """ """.join(text.split(UpperCamelCase_ ) )
return text
def _UpperCAmelCase (UpperCamelCase_ : List[str] ):
'''simple docstring'''
# load dataset
_lowerCAmelCase : Dict = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_lowerCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(args.model_id )
_lowerCAmelCase : str = feature_extractor.sampling_rate
# resample audio
_lowerCAmelCase : List[str] = dataset.cast_column("""audio""" , Audio(sampling_rate=UpperCamelCase_ ) )
# load eval pipeline
if args.device is None:
_lowerCAmelCase : List[Any] = 0 if torch.cuda.is_available() else -1
_lowerCAmelCase : List[str] = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCamelCase_ : List[str] ):
_lowerCAmelCase : List[Any] = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_lowerCAmelCase : List[str] = prediction["""text"""]
_lowerCAmelCase : str = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
_lowerCAmelCase : Union[str, Any] = dataset.map(UpperCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
_lowerCamelCase : List[Any] = parser.parse_args()
main(args)
| 159 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __snake_case (ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCAmelCase__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Tuple = CursorInfo()
_lowerCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Any = CursorInfo()
_lowerCAmelCase : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : List[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def _UpperCAmelCase ():
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 159 | 1 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Union[str, Any]) -> Dict:
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
__lowercase = len(UpperCamelCase_) if (len(UpperCamelCase_) > 7) else 7
# Print table header for output
print(
"Symbol".center(8), "Stack".center(UpperCamelCase_), "Postfix".center(UpperCamelCase_), sep=" | ", )
print("-" * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase_) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase_) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase_) == 0:
stack.append(UpperCamelCase_) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase_) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(UpperCamelCase_) # push x to stack
print(
x.center(8), ("".join(UpperCamelCase_)).ljust(UpperCamelCase_), ("".join(UpperCamelCase_)).ljust(UpperCamelCase_), sep=" | ", ) # Output in tabular format
while len(UpperCamelCase_) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
" ".center(8), ("".join(UpperCamelCase_)).ljust(UpperCamelCase_), ("".join(UpperCamelCase_)).ljust(UpperCamelCase_), sep=" | ", ) # Output in tabular format
return "".join(UpperCamelCase_) # return Postfix as str
def _A ( UpperCamelCase_ : Union[str, Any]) -> List[Any]:
'''simple docstring'''
__lowercase = list(infix[::-1]) # reverse the infix equation
for i in range(len(UpperCamelCase_)):
if infix[i] == "(":
__lowercase = ")" # change "(" to ")"
elif infix[i] == ")":
__lowercase = "(" # change ")" to "("
return (infix_2_postfix("".join(UpperCamelCase_)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_a = input('\nEnter an Infix Equation = ') # Input an Infix equation
_a = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 17 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(__UpperCAmelCase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), )
return min(
minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), )
def __magic_name__ ( ) -> None:
'''simple docstring'''
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
snake_case_ = math.log(len(__UpperCAmelCase ), 2 )
print('''Optimal value : ''', end='''''' )
print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 56 | 0 |
"""simple docstring"""
from math import factorial, pi
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Optional[Any] = 30 ) ->float:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
a : Union[str, Any] = float(__lowerCAmelCase )
a : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Tuple = 30 ) ->float:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
a : Dict = float(__lowerCAmelCase )
a : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 362 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 50 ) ->int:
'''simple docstring'''
a : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79 | 0 |
import math
from collections.abc import Callable
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = xa
SCREAMING_SNAKE_CASE : Union[str, Any] = xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
SCREAMING_SNAKE_CASE : int = x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
SCREAMING_SNAKE_CASE : Tuple = x_na
SCREAMING_SNAKE_CASE : int = x_na
def A ( _lowercase ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 182 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ):
try:
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ = []
snake_case_ = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[str] ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=UpperCamelCase__ , )
assert hasattr(self , """env""" )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Dict = F'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
lowerCamelCase__ : Union[str, Any] = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="""py36""" , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: List[Any] ):
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str ):
# create estimator
lowerCamelCase__ : Tuple = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase__ : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase__ : Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , UpperCamelCase__ )
| 129 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_A : Optional[Any] =typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_A : Optional[int] =typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(UpperCamelCase ) - np.asarray(UpperCamelCase )) ** 2 ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase , UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ () -> None:
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 129 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 'bit'
SCREAMING_SNAKE_CASE : Union[str, Any] = ['preactivation', 'bottleneck']
SCREAMING_SNAKE_CASE : str = ['SAME', 'VALID']
def __init__( self : Optional[Any] ,lowercase__ : Any=3 ,lowercase__ : Tuple=6_4 ,lowercase__ : List[str]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,lowercase__ : Any=[3, 4, 6, 3] ,lowercase__ : str="preactivation" ,lowercase__ : Dict="relu" ,lowercase__ : Optional[int]=None ,lowercase__ : str=3_2 ,lowercase__ : int=0.0 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Any=1 ,lowercase__ : Any=None ,lowercase__ : Dict=None ,**lowercase__ : Union[str, Any] ,):
super().__init__(**lowercase__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowercase = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
__lowercase = num_channels
__lowercase = embedding_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = layer_type
__lowercase = hidden_act
__lowercase = global_padding
__lowercase = num_groups
__lowercase = drop_path_rate
__lowercase = embedding_dynamic_padding
__lowercase = output_stride
__lowercase = width_factor
__lowercase = ['''stem'''] + [F"stage{idx}" for idx in range(1 ,len(lowercase__ ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowercase__ ,out_indices=lowercase__ ,stage_names=self.stage_names )
| 104 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : Tuple ,lowercase__ : Dict=1_3 ,lowercase__ : List[str]=3_0 ,lowercase__ : Tuple=2 ,lowercase__ : Optional[int]=3 ,lowercase__ : List[str]=True ,lowercase__ : Tuple=True ,lowercase__ : int=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : Tuple=4 ,lowercase__ : Any=3_7 ,lowercase__ : Any="gelu" ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : str=1_0 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Union[str, Any]=3 ,lowercase__ : Optional[int]=0.6 ,lowercase__ : List[Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : List[str] ):
__lowercase = ViTMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Optional[Any] ):
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size // self.patch_size) ** 2
__lowercase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowercase = 1
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowercase__ )
__lowercase = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ViTMAEModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
__lowercase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowercase = torch.from_numpy(lowercase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowercase = pt_noise
super().check_pt_tf_models(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs[0].cpu().numpy()
__lowercase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
__lowercase = model_class.from_pretrained(lowercase__ )
model.to(lowercase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
# Make sure we don't have nans
__lowercase = after_outputs[0].cpu().numpy()
__lowercase = 0
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ ,1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ViTMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__lowercase = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowercase = ViTMAEConfig()
__lowercase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ ,noise=torch.from_numpy(lowercase__ ).to(device=lowercase__ ) )
# verify the logits
__lowercase = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(lowercase__ ) ,atol=1e-4 ) )
| 104 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCAmelCase( *__lowerCamelCase ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__a = list(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
__a = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCAmelCase( __lowerCamelCase ):
__a = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCAmelCase( __lowerCamelCase = None , __lowerCamelCase = 128 ):
if function is None:
return functools.partial(__lowerCamelCase , starting_batch_size=__lowerCamelCase )
__a = starting_batch_size
def decorator(*__lowerCamelCase , **__lowerCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__a = list(inspect.signature(__lowerCamelCase ).parameters.keys() )
# Guard against user error
if len(__lowerCamelCase ) < (len(__lowerCamelCase ) + 1):
__a = ', '.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
except Exception as e:
if should_reduce_batch_size(__lowerCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 197 | import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase ) -> List[Any]:
__a = parent
__a = config_class
__a = has_text_modality
__a = kwargs
__a = common_properties
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.config_class(**self.inputs_dict )
__a = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase ):
try:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase ):
try:
__a = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
__a = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , 'config.json' )
config_first.to_json_file(UpperCAmelCase )
__a = self.config_class.from_json_file(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.config_class(**self.inputs_dict )
__a = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , UpperCAmelCase )
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__a = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if self.config_class.is_composition:
return
__a = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = copy.deepcopy(UpperCAmelCase )
__a = self.config_class(**UpperCAmelCase )
__a = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase , UpperCAmelCase ) != value:
wrong_values.append((key, getattr(UpperCAmelCase , UpperCAmelCase ), value) )
if len(UpperCAmelCase ) > 0:
__a = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 197 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "switch_transformers"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__(self , UpperCAmelCase=32128 , UpperCAmelCase=768 , UpperCAmelCase=64 , UpperCAmelCase=2048 , UpperCAmelCase=64 , UpperCAmelCase=12 , UpperCAmelCase=3 , UpperCAmelCase=12 , UpperCAmelCase=3 , UpperCAmelCase=12 , UpperCAmelCase=8 , UpperCAmelCase=False , UpperCAmelCase=0.01 , UpperCAmelCase="float32" , UpperCAmelCase=False , UpperCAmelCase=32 , UpperCAmelCase=128 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-6 , UpperCAmelCase=0.001 , UpperCAmelCase=0.001 , UpperCAmelCase=1.0 , UpperCAmelCase="relu" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase=1 , **UpperCAmelCase , ) -> Tuple:
_snake_case = vocab_size
_snake_case = d_model
_snake_case = d_kv
_snake_case = d_ff
_snake_case = num_sparse_encoder_layers
_snake_case = num_layers
_snake_case = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_snake_case = self.num_layers // self.num_sparse_encoder_layers
else:
_snake_case = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_snake_case = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_snake_case = self.num_decoder_layers # HACK: this will create 0 sparse layers
_snake_case = num_heads
_snake_case = num_experts
_snake_case = expert_capacity
_snake_case = router_bias
_snake_case = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
_snake_case = router_dtype
_snake_case = router_ignore_padding_tokens
_snake_case = relative_attention_num_buckets
_snake_case = relative_attention_max_distance
_snake_case = dropout_rate
_snake_case = layer_norm_epsilon
_snake_case = initializer_factor
_snake_case = feed_forward_proj
_snake_case = use_cache
_snake_case = add_router_probs
_snake_case = router_z_loss_coef
_snake_case = router_aux_loss_coef
_snake_case = self.feed_forward_proj.split("""-""" )
_snake_case = act_info[-1]
_snake_case = act_info[0] == """gated"""
if len(UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_snake_case = """gelu_new"""
super().__init__(
pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase , ) | 341 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = feature_size
A__ = sampling_rate
A__ = padding_value
A__ = kwargs.pop("padding_side" , "right" )
A__ = kwargs.pop("return_attention_mask" , lowercase )
super().__init__(**lowercase )
def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
A__ = processed_features[self.model_input_names[0]]
A__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase ) == 0:
if return_attention_mask:
A__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A__ = required_input[0]
if isinstance(lowercase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A__ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase ):
A__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase ):
A__ = "tf"
elif is_torch_tensor(lowercase ):
A__ = "pt"
elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ):
A__ = "np"
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowercase )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A__ = to_numpy(lowercase )
else:
A__ = [to_numpy(lowercase ) for v in value]
# Convert padding_strategy in PaddingStrategy
A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase )
A__ = processed_features[self.model_input_names[0]]
A__ = len(lowercase )
if not all(len(lowercase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
A__ = []
for i in range(lowercase ):
A__ = {k: v[i] for k, v in processed_features.items()}
# truncation
A__ = self._truncate(
lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , )
truncated_inputs.append(lowercase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A__ = PaddingStrategy.MAX_LENGTH
A__ = {}
for i in range(lowercase ):
# padding
A__ = self._pad(
truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
for key, value in outputs.items():
if key not in batch_outputs:
A__ = []
if value.dtype is np.dtype(np.floataa ):
A__ = value.astype(np.floataa )
batch_outputs[key].append(lowercase )
return BatchFeature(lowercase , tensor_type=lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict:
'''simple docstring'''
A__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A__ = len(lowercase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A__ = np.ones(len(lowercase ) , dtype=np.intaa )
if needs_to_be_padded:
A__ = max_length - len(lowercase )
if self.padding_side == "right":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (0, difference) )
A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A__ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (difference, 0) )
A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A__ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
A__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = len(lowercase ) > max_length
if needs_to_be_truncated:
A__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A__ = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any:
'''simple docstring'''
if padding is not False:
if padding is True:
A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase , lowercase ):
A__ = PaddingStrategy(lowercase )
elif isinstance(lowercase , lowercase ):
A__ = padding
else:
A__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 68 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 172 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : int , snake_case__ : Dict=None , snake_case__ : List[str]=None , *snake_case__ : str , **snake_case__ : Optional[Any] ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
lowercase :int = self.model.config
else:
lowercase :str = config
lowercase :Dict = data_args
lowercase :int = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase :List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase :Union[str, Any] = label_smoothed_nll_loss
def __snake_case ( self : Union[str, Any] , snake_case__ : int ):
'''simple docstring'''
if self.optimizer is None:
lowercase :Optional[int] = ['''bias''', '''LayerNorm.weight''']
lowercase :int = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase :List[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase :Union[str, Any] = Adafactor
lowercase :Dict = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase :List[str] = AdamW
lowercase :Union[str, Any] = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase :Tuple = self.args.learning_rate
if self.sharded_ddp:
lowercase :Union[str, Any] = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
lowercase :Dict = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
lowercase :List[Any] = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def __snake_case ( self : Any , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :Tuple = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase :Dict = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase :str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase :int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def __snake_case ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __snake_case ( self : Any , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase :List[Any] = model(**snake_case__ , use_cache=snake_case__ )[0]
lowercase :Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase , lowercase :str = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
lowercase :str = model(**snake_case__ , use_cache=snake_case__ )[0]
lowercase :Tuple = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
lowercase , lowercase :Optional[int] = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __snake_case ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
lowercase :List[str] = inputs.pop('''labels''' )
lowercase , lowercase :Union[str, Any] = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def __snake_case ( self : List[str] , snake_case__ : nn.Module , snake_case__ : Dict[str, Union[torch.Tensor, Any]] , snake_case__ : bool , snake_case__ : Optional[List[str]] = None , ):
'''simple docstring'''
lowercase :List[str] = self._prepare_inputs(snake_case__ )
lowercase :Optional[Any] = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase :Optional[Any] = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase :int = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs['''max_length'''] )
lowercase :Any = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase , lowercase :List[str] = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
lowercase :List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase :Any = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase :Tuple = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def __snake_case ( self : int , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
lowercase :Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f""" padded to `max_length`={max_length}""" )
lowercase :Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase :Any = tensor
return padded_tensor
| 172 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a__ : Union[str, Any] =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , *__A : str , **__A : Optional[Any] ):
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 53 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A__ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , **_snake_case ):
"""simple docstring"""
requires_backends(self , ["""bs4"""] )
super().__init__(**_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_lowerCAmelCase = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
_lowerCAmelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = BeautifulSoup(_snake_case , """html.parser""" )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_lowerCAmelCase = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(_snake_case ) != len(_snake_case ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = """"""
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F'/{tagname}'
if subs != 0:
xpath += F'[{subs}]'
return xpath
def __call__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
_lowerCAmelCase = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F'but is of type {type(_snake_case )}.' )
_lowerCAmelCase = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
_lowerCAmelCase = [html_strings]
# Get nodes + xpaths
_lowerCAmelCase = []
_lowerCAmelCase = []
for html_string in html_strings:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
_lowerCAmelCase = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
_lowerCAmelCase = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
_lowerCAmelCase = {"""nodes""": nodes, """xpaths""": xpaths}
_lowerCAmelCase = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs
| 82 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> str:
# A mock response for an HTTP head request to emulate server down
__UpperCamelCase :Optional[Any] = mock.Mock()
__UpperCamelCase :int = 500
__UpperCamelCase :List[Any] = {}
__UpperCamelCase :List[str] = HTTPError
__UpperCamelCase :List[str] = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase :str = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowercase) as mock_head:
__UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCamelCase__ ( self) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__UpperCamelCase :List[Any] = mock.Mock()
__UpperCamelCase :List[Any] = 500
__UpperCamelCase :int = {}
__UpperCamelCase :List[Any] = HTTPError
__UpperCamelCase :Union[str, Any] = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase :Optional[int] = GPTaTokenizerFast.from_pretrained('''gpt2''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowercase) as mock_head:
__UpperCamelCase :Optional[int] = GPTaTokenizerFast.from_pretrained('''gpt2''')
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
try:
__UpperCamelCase :Any = tempfile.mktemp()
with open(__lowercase , '''wb''') as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , __lowercase)
__UpperCamelCase :Dict = AlbertTokenizer.from_pretrained(__lowercase)
finally:
os.remove(__lowercase)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json'''):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''') as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , __lowercase)
__UpperCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''')
def UpperCamelCase__ ( self) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
__UpperCamelCase :Any = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''')
@is_staging_test
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCamelCase__ ( cls) -> Dict:
__UpperCamelCase :Union[str, Any] = TOKEN
HfFolder.save_token(__lowercase)
@classmethod
def UpperCamelCase__ ( cls) -> str:
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''')
except HTTPError:
pass
def UpperCamelCase__ ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase :List[Any] = os.path.join(__lowercase , '''vocab.txt''')
with open(__lowercase , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCamelCase :Optional[Any] = BertTokenizer(__lowercase)
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token)
__UpperCamelCase :Dict = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase , repo_id='''test-tokenizer''' , push_to_hub=__lowercase , use_auth_token=self._token)
__UpperCamelCase :List[str] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def UpperCamelCase__ ( self) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase :Optional[Any] = os.path.join(__lowercase , '''vocab.txt''')
with open(__lowercase , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCamelCase :int = BertTokenizer(__lowercase)
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token)
__UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__lowercase , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=__lowercase , use_auth_token=self._token)
__UpperCamelCase :List[str] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def UpperCamelCase__ ( self) -> List[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase :Optional[int] = os.path.join(__lowercase , '''vocab.txt''')
with open(__lowercase , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCamelCase :Optional[Any] = CustomTokenizer(__lowercase)
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__lowercase)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase :int = os.path.join(__lowercase , '''vocab.txt''')
with open(__lowercase , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCamelCase :Tuple = BertTokenizerFast.from_pretrained(__lowercase)
bert_tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = CustomTokenizerFast.from_pretrained(__lowercase)
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__UpperCamelCase :Any = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__lowercase)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''')
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(
f"""{USER}/test-dynamic-tokenizer""" , use_fast=__lowercase , trust_remote_code=__lowercase)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = Trie()
trie.add('''Hello 友達''')
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
trie.add('''Hello''')
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS] This is a extra_id_100'''])
trie.add('''[CLS]''')
trie.add('''extra_id_1''')
trie.add('''extra_id_100''')
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''])
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = Trie()
trie.add('''A''')
self.assertEqual(trie.split('''ABC''') , ['''A''', '''BC'''])
self.assertEqual(trie.split('''BCA''') , ['''BC''', '''A'''])
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = Trie()
trie.add('''TOKEN]''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Optional[Any] = Trie()
trie.add('''A''')
trie.add('''P''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Tuple = Trie()
trie.add('''AB''')
trie.add('''B''')
trie.add('''C''')
self.assertEqual(trie.split('''ABC''') , ['''AB''', '''C'''])
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = Trie()
trie.add('''ABC''')
trie.add('''B''')
trie.add('''CD''')
self.assertEqual(trie.split('''ABCD''') , ['''ABC''', '''D'''])
def UpperCamelCase__ ( self) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__UpperCamelCase :Dict = Trie()
__UpperCamelCase :Optional[int] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3])
self.assertEqual(__lowercase , ['''AB''', '''C'''])
| 105 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = '''▁'''
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = BertGenerationTokenizer
a__ : Dict = False
a__ : str = True
def UpperCamelCase__ ( self) -> List[str]:
super().setUp()
__UpperCamelCase :Optional[Any] = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = '''<s>'''
__UpperCamelCase :Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase) , __lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase) , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(__lowercase) , 1_002)
def UpperCamelCase__ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase)
__UpperCamelCase :Optional[Any] = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase) , [285, 46, 10, 170, 382] , )
__UpperCamelCase :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCamelCase :Any = tokenizer.convert_tokens_to_ids(__lowercase)
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__UpperCamelCase :Tuple = tokenizer.convert_ids_to_tokens(__lowercase)
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase__ ( self) -> int:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = '''Hello World!'''
__UpperCamelCase :Optional[int] = [18_536, 2_260, 101]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase))
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__UpperCamelCase :Union[str, Any] = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase))
@require_torch
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__UpperCamelCase :Optional[Any] = list(self.big_tokenizer.get_vocab().keys())[:10]
__UpperCamelCase :Optional[int] = ''' '''.join(__lowercase)
__UpperCamelCase :Optional[int] = self.big_tokenizer.encode_plus(__lowercase , return_tensors='''pt''' , return_token_type_ids=__lowercase)
__UpperCamelCase :List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__lowercase)
__UpperCamelCase :List[Any] = BertGenerationConfig()
__UpperCamelCase :Optional[Any] = BertGenerationEncoder(__lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase)
model(**__lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
# fmt: off
__UpperCamelCase :List[str] = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 105 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
def lowercase__(A , A ) ->List[Any]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(UpperCamelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase__(A="ro" , A="en" , A="wmt16" , A=None ) ->None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
lowercase__ : int= f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
lowercase__ : List[Any]= datasets.load_dataset(A , A )
if save_dir is None:
lowercase__ : Union[str, Any]= f'''{dataset}-{pair}'''
lowercase__ : str= Path(A )
save_dir.mkdir(exist_ok=A )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
lowercase__ : Any= "val" if split == "validation" else split
lowercase__ : List[Any]= save_dir.joinpath(f'''{fn}.source''' )
lowercase__ : Optional[Any]= save_dir.joinpath(f'''{fn}.target''' )
lowercase__ : Optional[int]= src_path.open("w+" )
lowercase__ : Any= tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase__ : int= x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 150 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : int , _A : str , _A : Optional[Any]=13 , _A : Tuple=7 , _A : Dict=True , _A : Any=True , _A : str=False , _A : str=True , _A : List[str]=99 , _A : Tuple=32 , _A : str=5 , _A : List[Any]=4 , _A : str=37 , _A : Tuple="gelu" , _A : List[str]=0.1 , _A : Any=0.1 , _A : str=512 , _A : List[str]=16 , _A : Union[str, Any]=2 , _A : Any=0.0_2 , _A : Any=3 , _A : Dict=4 , _A : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Dict = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : int = scope
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : str ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : int , _A : Optional[Any] , _A : List[Any] , _A : Any , _A : Optional[int] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BioGptModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Tuple = model(_A , attention_mask=_A )
UpperCAmelCase__ : str = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : int , _A : Optional[Any] , _A : List[Any] , _A : int , _A : List[str] , _A : str , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BioGptForCausalLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Tuple , _A : Any , _A : List[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[Any] , *_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = BioGptModel(config=_A )
model.to(_A )
model.eval()
# create attention mask
UpperCAmelCase__ : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
UpperCAmelCase__ : str = self.seq_length // 2
UpperCAmelCase__ : Union[str, Any] = 0
# first forward pass
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase__ : List[Any] = ids_tensor((1,) , _A ).item() + 1
UpperCAmelCase__ : str = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase__ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_A )] , dim=1 , )
# get two different outputs
UpperCAmelCase__ : Any = model(_A , attention_mask=_A )['''last_hidden_state''']
UpperCAmelCase__ : Optional[int] = model(_A , past_key_values=_A , attention_mask=_A )['''last_hidden_state''']
# select random slice
UpperCAmelCase__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : Any = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def lowercase_ ( self : Tuple , _A : Any , _A : List[Any] , _A : List[Any] , _A : Any , _A : str , *_A : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = BioGptModel(config=_A ).to(_A ).eval()
UpperCAmelCase__ : str = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
# first forward pass
UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A , use_cache=_A )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Dict = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase__ : Dict = model(_A , attention_mask=_A )['''last_hidden_state''']
UpperCAmelCase__ : Dict = model(_A , attention_mask=_A , past_key_values=_A )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def lowercase_ ( self : Any , _A : List[Any] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : Optional[int] , *_A : List[Any] , _A : str=False ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BioGptForCausalLM(_A )
model.to(_A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase__ : Tuple = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase_ ( self : Union[str, Any] , _A : int , *_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = BioGptModel(_A )
UpperCAmelCase__ : Any = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : List[str] , *_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : str = BioGptForTokenClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = BioGptModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Any = type
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_A , gradient_checkpointing=_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_A )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_A )
UpperCAmelCase__ : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase__ : str = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase__ : Tuple = tokenizer.eos_token
UpperCAmelCase__ : str = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase__ : List[str] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase__ : List[Any] = tokenizer(_A , return_tensors='''pt''' , padding=_A )
UpperCAmelCase__ : Dict = inputs['''input_ids'''].to(_A )
UpperCAmelCase__ : List[str] = model.generate(
input_ids=_A , attention_mask=inputs['''attention_mask'''].to(_A ) , )
UpperCAmelCase__ : List[str] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_A )
UpperCAmelCase__ : List[str] = model.generate(input_ids=_A )
UpperCAmelCase__ : List[Any] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase__ : Optional[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_A )
UpperCAmelCase__ : Optional[Any] = model.generate(input_ids=_A , max_length=model.config.max_length - num_paddings )
UpperCAmelCase__ : List[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A )
UpperCAmelCase__ : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
UpperCAmelCase__ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
UpperCAmelCase__ : List[str] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = BioGptModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : Union[str, Any] = input_dict['''input_ids''']
UpperCAmelCase__ : Dict = input_ids.ne(1 ).to(_A )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase__ : Optional[Any] = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = 3
UpperCAmelCase__ : Dict = '''multi_label_classification'''
UpperCAmelCase__ : str = input_dict['''input_ids''']
UpperCAmelCase__ : int = input_ids.ne(1 ).to(_A )
UpperCAmelCase__ : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase__ : List[Any] = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([[2, 4_805, 9, 656, 21]] )
UpperCAmelCase__ : List[Any] = model(_A )[0]
UpperCAmelCase__ : Tuple = 42_384
UpperCAmelCase__ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _A )
UpperCAmelCase__ : int = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase__ : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_A )
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_A )
UpperCAmelCase__ : List[str] = model.generate(
**_A , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=_A , )
UpperCAmelCase__ : str = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
UpperCAmelCase__ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_A , _A )
| 181 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[float] ):
'''simple docstring'''
_UpperCAmelCase : str =0.00
_UpperCAmelCase : str =0
for resistor in resistors:
if resistor <= 0:
_UpperCAmelCase : Union[str, Any] =f"Resistor at index {index} has a negative or zero value!"
raise ValueError(__lowerCamelCase )
first_sum += 1 / float(__lowerCamelCase )
index += 1
return 1 / first_sum
def lowerCamelCase__ ( __lowerCamelCase : list[float] ):
'''simple docstring'''
_UpperCAmelCase : Dict =0.00
_UpperCAmelCase : List[str] =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_UpperCAmelCase : str =f"Resistor at index {index} has a negative value!"
raise ValueError(__lowerCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 |
'''simple docstring'''
from string import ascii_uppercase
lowercase ={str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 3_6:
raise ValueError('base must be <= 36' )
_UpperCAmelCase : Union[str, Any] =''
_UpperCAmelCase : Optional[int] =0
_UpperCAmelCase : str =0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : int =divmod(__lowerCamelCase , __lowerCamelCase )
if base >= 1_1 and 9 < mod < 3_6:
_UpperCAmelCase : str =ALPHABET_VALUES[str(__lowerCamelCase )]
else:
_UpperCAmelCase : Any =str(__lowerCamelCase )
new_value += actual_value
_UpperCAmelCase : Union[str, Any] =num // base
_UpperCAmelCase : Dict =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "marian"
snake_case = ["past_key_values"]
snake_case = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _SCREAMING_SNAKE_CASE=5_8101 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=5_8100 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=5_8100 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->Union[str, Any]:
'''simple docstring'''
A_ : str = vocab_size
A_ : Any = decoder_vocab_size or vocab_size
A_ : int = max_position_embeddings
A_ : str = d_model
A_ : List[Any] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : List[Any] = encoder_attention_heads
A_ : str = decoder_ffn_dim
A_ : str = decoder_layers
A_ : int = decoder_attention_heads
A_ : Optional[Any] = dropout
A_ : List[str] = attention_dropout
A_ : str = activation_dropout
A_ : Tuple = activation_function
A_ : Any = init_std
A_ : List[Any] = encoder_layerdrop
A_ : Dict = decoder_layerdrop
A_ : Any = use_cache
A_ : Any = encoder_layers
A_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Union[str, Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A_ : Union[str, Any] = {0: '''batch'''}
A_ : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : int = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A_ , A_ : Optional[Any] = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
A_ : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
A_ : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A_ : Dict = super().outputs
else:
A_ : int = super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
A_ , A_ : Tuple = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
A_ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , )->Mapping[str, Any]:
'''simple docstring'''
A_ : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
A_ : List[Any] = seq_length if not self.use_past else 1
A_ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A_ : Union[str, Any] = dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ , A_ : Any = common_inputs['''input_ids'''].shape
A_ : int = common_inputs['''decoder_input_ids'''].shape[1]
A_ , A_ : List[str] = self.num_attention_heads
A_ : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Union[str, Any] = decoder_seq_length + 3
A_ : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
A_ : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_ , A_ : str = self.num_layers
A_ : List[str] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
A_ : Any = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
A_ : List[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , )->Mapping[str, Any]:
'''simple docstring'''
A_ : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ , A_ : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A_ : Union[str, Any] = seqlen + 2
A_ , A_ : Tuple = self.num_layers
A_ , A_ : int = self.num_attention_heads
A_ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Optional[Any] = common_inputs['''attention_mask'''].dtype
A_ : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
A_ : Union[str, Any] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , )->Mapping[str, Any]:
'''simple docstring'''
A_ : Dict = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : List[str] = tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
A_ : List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : str = dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , )->Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A_ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
A_ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A_ : Dict = super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
A_ : Optional[int] = super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->float:
'''simple docstring'''
return 1e-4
| 186 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = None
A_ : Any = 20
A_ : Any = self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
A_ : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
A_ : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
A_ : List[str] = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
A_ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
A_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
A_ : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Any = None
A_ : List[Any] = 10
A_ : str = 2
# create ramp distribution
A_ : Any = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
A_ : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : Tuple = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
A_ : Optional[int] = 5
A_ : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
A_ : Optional[Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
A_ : Dict = top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = None
A_ : Optional[Any] = 10
A_ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A_ : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
A_ : str = FlaxTopPLogitsWarper(0.8 )
A_ : Optional[int] = np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A_ : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
A_ : Union[str, Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A_ : str = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
A_ : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
A_ : str = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = 20
A_ : Union[str, Any] = 4
A_ : Optional[Any] = 0
A_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
A_ : int = ids_tensor((batch_size, 20) , vocab_size=20 )
A_ : List[Any] = 5
A_ : Optional[int] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
A_ : Tuple = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = 15
A_ : int = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = 20
A_ : Optional[int] = 4
A_ : Optional[int] = 0
A_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
A_ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
A_ : str = 1
A_ : List[str] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A_ : Optional[int] = 3
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = 20
A_ : str = 4
A_ : Dict = 0
A_ : Optional[int] = 5
A_ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
A_ : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
A_ : Any = 4
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A_ : int = 3
A_ : Union[str, Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Union[str, Any] = 15
A_ : str = 2
A_ : int = 1
A_ : List[str] = 15
# dummy input_ids and scores
A_ : Tuple = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : int = input_ids.copy()
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = scores.copy()
# instantiate all dist processors
A_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = 10
# no processor list
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[str] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
A_ : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : List[str] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Tuple = 15
A_ : List[str] = 2
A_ : List[str] = 1
A_ : Union[str, Any] = 15
# dummy input_ids and scores
A_ : Any = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = input_ids.copy()
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = scores.copy()
# instantiate all dist processors
A_ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
A_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : str = 10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : Optional[int] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
A_ : Optional[int] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Dict = jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 186 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = 0
for ch in input_str:
lowerCamelCase_ = ord(lowercase )
lowerCamelCase_ = pow(2 , lowercase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''transfo-xl'''
UpperCamelCase = ['''mems''']
UpperCamelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , A_ : Optional[Any]=267735 , A_ : Optional[Any]=[20000, 40000, 200000] , A_ : Union[str, Any]=1024 , A_ : Optional[Any]=1024 , A_ : Optional[int]=16 , A_ : Any=64 , A_ : List[Any]=4096 , A_ : str=4 , A_ : int=False , A_ : List[Any]=18 , A_ : Optional[int]=1600 , A_ : Union[str, Any]=1000 , A_ : Optional[Any]=True , A_ : Optional[int]=True , A_ : List[str]=0 , A_ : int=-1 , A_ : List[Any]=True , A_ : List[Any]=0.1 , A_ : str=0.0 , A_ : Dict=True , A_ : Dict="normal" , A_ : Dict=0.01 , A_ : Optional[Any]=0.01 , A_ : Any=0.02 , A_ : int=1E-5 , A_ : List[str]=0 , **A_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
lowerCamelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase_ = [False] + [False] * len(self.cutoffs )
lowerCamelCase_ = d_model
lowerCamelCase_ = d_embed
lowerCamelCase_ = d_head
lowerCamelCase_ = d_inner
lowerCamelCase_ = div_val
lowerCamelCase_ = pre_lnorm
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = mem_len
lowerCamelCase_ = same_length
lowerCamelCase_ = attn_type
lowerCamelCase_ = clamp_len
lowerCamelCase_ = sample_softmax
lowerCamelCase_ = adaptive
lowerCamelCase_ = dropout
lowerCamelCase_ = dropatt
lowerCamelCase_ = untie_r
lowerCamelCase_ = init
lowerCamelCase_ = init_range
lowerCamelCase_ = proj_init_std
lowerCamelCase_ = init_std
lowerCamelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=A_ , **A_ )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , A_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 208 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = ['image_processor', 'feature_extractor']
__lowerCAmelCase : List[str] = 'TvltImageProcessor'
__lowerCAmelCase : List[str] = 'TvltFeatureExtractor'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
super().__init__(image_processor=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = image_processor
UpperCAmelCase : List[str] = feature_extractor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
UpperCAmelCase : Union[str, Any] = None
if images is not None:
UpperCAmelCase : Optional[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , mask_pixel=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images_mixed is not None:
UpperCAmelCase : Tuple = self.image_processor(_SCREAMING_SNAKE_CASE , is_mixed=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if audio is not None:
UpperCAmelCase : Union[str, Any] = self.feature_extractor(
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , mask_audio=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = {}
if audio is not None:
output_dict.update(_SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(_SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(_SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = self.image_processor.model_input_names
UpperCAmelCase : Optional[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 109 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : list ) -> list:
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(UpperCAmelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCAmelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 239 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Dict = os.path.join(a_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(a_ )
__snake_case : Any = self.feature_extraction_class.from_json_file(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[int] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
__snake_case : Dict = self.feature_extraction_class.from_pretrained(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.feature_extraction_class()
self.assertIsNotNone(a_ )
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: int , lowercase_: Dict , lowercase_: Tuple ) -> Any:
# Construct model
if gpta_config_file == "":
A__ : Dict = GPTaConfig()
else:
A__ : List[Any] = GPTaConfig.from_json_file(lowercase_ )
A__ : Tuple = GPTaModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A_ : str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 192 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Optional[Any] = '▁'
A_ : int = {'vocab_file': 'sentencepiece.bpe.model'}
A_ : int = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
A_ : Optional[int] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
A_ : Tuple = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: List[int] = []
UpperCAmelCase__: List[int] = []
def __init__( self , A__ , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=None , A__=None , A__=None , A__ = None , A__=None , A__=False , **A__ , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
A__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
A__ : List[str] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
A__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
A__ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ : str = 1
A__ : Optional[int] = len(self.sp_model )
A__ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
A__ : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
A__ : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A__ : int = src_lang if src_lang is not None else """eng_Latn"""
A__ : str = self.lang_code_to_id[self._src_lang]
A__ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
A__ : Tuple = self.__dict__.copy()
A__ : List[Any] = None
A__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A__ ):
A__ : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Any = {}
A__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self ):
return self._src_lang
@src_lang.setter
def __A ( self , A__ ):
A__ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
A__ : Dict = [1] * len(self.prefix_tokens )
A__ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def __A ( self , A__ , A__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self , A__ , A__ = None ):
A__ : Dict = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , A__ , A__ , A__ , A__ , **A__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A__ : Optional[int] = src_lang
A__ : List[Any] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
A__ : Optional[int] = self.convert_tokens_to_ids(A__ )
A__ : Optional[int] = tgt_lang_id
return inputs
def __A ( self ):
A__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , A__ ):
return self.sp_model.encode(A__ , out_type=A__ )
def __A ( self , A__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ : List[str] = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self , A__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self , A__ ):
A__ : Optional[Any] = """""".join(A__ ).replace(A__ , """ """ ).strip()
return out_string
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : Any = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
A__ : str = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __A ( self , A__ , A__ = "eng_Latn" , A__ = None , A__ = "fra_Latn" , **A__ , ):
A__ : Any = src_lang
A__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def __A ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self , A__ ):
A__ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A__ : Dict = []
A__ : str = [self.eos_token_id, self.cur_lang_code]
else:
A__ : List[str] = [self.cur_lang_code]
A__ : Optional[Any] = [self.eos_token_id]
def __A ( self , A__ ):
A__ : Union[str, Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A__ : Union[str, Any] = []
A__ : int = [self.eos_token_id, self.cur_lang_code]
else:
A__ : Dict = [self.cur_lang_code]
A__ : str = [self.eos_token_id]
| 192 | 1 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCamelCase__ = logging.get_logger(__name__)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = question_encoder
__lowerCAmelCase : Any = generator
__lowerCAmelCase : Optional[Any] = self.question_encoder
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__lowerCAmelCase : Dict = os.path.join(__lowerCamelCase , 'question_encoder_tokenizer' )
__lowerCAmelCase : Dict = os.path.join(__lowerCamelCase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(__lowerCamelCase )
self.generator.save_pretrained(__lowerCamelCase )
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
from ..auto.tokenization_auto import AutoTokenizer
__lowerCAmelCase : Optional[Any] = kwargs.pop('config' , __lowerCamelCase )
if config is None:
__lowerCAmelCase : int = RagConfig.from_pretrained(__lowerCamelCase )
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__lowerCamelCase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(
__lowerCamelCase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=__lowerCamelCase , generator=__lowerCamelCase )
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.current_tokenizer(*__lowerCamelCase , **__lowerCamelCase )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.generator.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.generator.decode(*__lowerCamelCase , **__lowerCamelCase )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.question_encoder
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.generator
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "longest" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , __lowerCamelCase , )
if max_length is None:
__lowerCAmelCase : Union[str, Any] = self.current_tokenizer.model_max_length
__lowerCAmelCase : List[str] = self(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCAmelCase : str = self.current_tokenizer.model_max_length
__lowerCAmelCase : Optional[int] = self(
text_target=__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , )
__lowerCAmelCase : Optional[Any] = labels['''input_ids''']
return model_inputs
| 356 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[Any] = KandinskyVaaInpaintPipeline
A_ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
A_ : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
A_ : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A_ : Any = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 1_00
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCAmelCase : Any = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.dummy_unet
__lowerCAmelCase : Optional[Any] = self.dummy_movq
__lowerCAmelCase : Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : str = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase : Dict = np.ones((64, 64) , dtype=np.floataa )
__lowerCAmelCase : List[str] = 0
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = output.images
__lowerCAmelCase : Any = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : str = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCAmelCase : Any = np.ones((7_68, 7_68) , dtype=np.floataa )
__lowerCAmelCase : int = 0
__lowerCAmelCase : str = 'a hat'
__lowerCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__lowerCAmelCase : Tuple = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase : Any = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCAmelCase : Tuple = pipeline(
image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
__lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 182 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return 0.0
def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a_ ( __snake_case : FilterType , __snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ =512
lowerCamelCase_ =[1] + [0] * (size - 1)
lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs]
lowerCamelCase_ =[0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) )
lowerCamelCase_ =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowerCamelCase_ =get_bounds(__snake_case , __snake_case )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__snake_case )
plt.show()
def a_ ( __snake_case : FilterType , __snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ =512
lowerCamelCase_ =[1] + [0] * (size - 1)
lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs]
lowerCamelCase_ =[0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__snake_case , -2 * pi ) )
plt.show()
| 75 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a_ : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={}
if "candidate_labels" in kwargs:
lowerCamelCase_ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCamelCase_ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase_ =requests.get(lowerCAmelCase ).content
else:
with open(lowerCAmelCase, '''rb''' ) as f:
lowerCamelCase_ =f.read()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCamelCase_ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
lowerCamelCase_ =candidate_labels
lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase )
lowerCamelCase_ =[text_inputs]
return inputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_inputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowerCAmelCase ):
lowerCamelCase_ =text_inputs[0]
else:
# Batching case.
lowerCamelCase_ =text_inputs[0][0]
lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_outputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCamelCase_ =logits.softmax(dim=0 )
lowerCamelCase_ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCamelCase_ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] )
]
return result
| 75 | 1 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowercase = '''base_with_context'''
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__UpperCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase =weights[F"""layers_{lyr_num}"""]
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase =ly_weight['''attention''']
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__UpperCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase =weights[F"""layers_{lyr_num}"""]
__UpperCamelCase =ly_weight['''attention''']
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__UpperCamelCase )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCamelCase =weights[F"""layers_{lyr_num}"""]
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
__UpperCamelCase =ly_weight['''self_attention''']
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase =ly_weight['''MultiHeadDotProductAttention_0''']
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase =checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCamelCase =jnp.tree_util.tree_map(onp.array , __UpperCamelCase )
__UpperCamelCase =[
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
__UpperCamelCase =os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
__UpperCamelCase =inference.parse_training_gin_file(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =inference.InferenceModel(args.checkpoint_path , __UpperCamelCase )
__UpperCamelCase =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
__UpperCamelCase =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__UpperCamelCase =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__UpperCamelCase =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__UpperCamelCase =load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , __UpperCamelCase )
__UpperCamelCase =load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , __UpperCamelCase )
__UpperCamelCase =load_decoder(ta_checkpoint['''target''']['''decoder'''] , __UpperCamelCase )
__UpperCamelCase =OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
__UpperCamelCase =SpectrogramDiffusionPipeline(
notes_encoder=__UpperCamelCase , continuous_encoder=__UpperCamelCase , decoder=__UpperCamelCase , scheduler=__UpperCamelCase , melgan=__UpperCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__lowercase = parser.parse_args()
main(args)
| 370 | """simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowercase = '''\
Text data.
Second line of data.'''
__lowercase = '''file'''
@pytest.fixture(scope='''session''' )
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__UpperCamelCase =bytes(__UpperCamelCase , '''utf-8''' )
with zstd.open(__UpperCamelCase , '''wb''' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __UpperCamelCase ) , '''w''' ) as f:
f.write(__UpperCamelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__UpperCamelCase =input_paths[compression_format]
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase =DownloadConfig(cache_dir=__UpperCamelCase , extract_compressed_file=__UpperCamelCase )
__UpperCamelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase )
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowerCAmelCase (__UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase ='''custom_cache'''
__UpperCamelCase ='''custom_extracted_dir'''
__UpperCamelCase =tmp_path / '''custom_extracted_path'''
if default_extracted:
__UpperCamelCase =('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __UpperCamelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__UpperCamelCase ) )
__UpperCamelCase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__UpperCamelCase =xz_file
__UpperCamelCase =(
DownloadConfig(extract_compressed_file=__UpperCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__UpperCamelCase )
)
__UpperCamelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase )
assert Path(__UpperCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =str(Path(__UpperCamelCase ).resolve() )
assert cached_path(__UpperCamelCase ) == text_file
# relative path
__UpperCamelCase =str(Path(__UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__UpperCamelCase ) == text_file
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
# relative path
__UpperCamelCase ='''./__missing_file__.txt'''
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
http_get('''https://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
fsspec_head('''s3://huggingface.co''' )
| 85 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = AutoencoderKL
_UpperCamelCase : List[Any] = 'sample'
_UpperCamelCase : List[Any] = 1E-2
@property
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 4
lowercase : str = 3
lowercase : Optional[Any] = (32, 32)
lowercase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
return {"sample": image}
@property
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
def __a ( self : int ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowercase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __a ( self : Tuple ) -> int:
"""simple docstring"""
pass
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase , lowercase : List[Any] = self.prepare_init_args_and_inputs_for_common()
lowercase : Union[str, Any] = self.model_class(**a__ )
model.to(a__ )
assert not model.is_gradient_checkpointing and model.training
lowercase : List[Any] = model(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase : Tuple = torch.randn_like(a__ )
lowercase : List[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase : str = self.model_class(**a__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase : Tuple = model_a(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
lowercase : Union[str, Any] = dict(model.named_parameters() )
lowercase : int = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase , lowercase : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(a__ )
lowercase : List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase : Any = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
lowercase : List[Any] = model.to(a__ )
model.eval()
if torch_device == "mps":
lowercase : Union[str, Any] = torch.manual_seed(0 )
else:
lowercase : List[str] = torch.Generator(device=a__ ).manual_seed(0 )
lowercase : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase : Union[str, Any] = image.to(a__ )
with torch.no_grad():
lowercase : Union[str, Any] = model(a__ , sample_posterior=a__ , generator=a__ ).sample
lowercase : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase : str = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
lowercase : int = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase : int = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(a__ , a__ , rtol=1E-2 ) )
@slow
class _A ( unittest.TestCase ):
def __a ( self : Tuple , _A : Optional[Any] , _A : List[str] ) -> List[str]:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(a__ ) for s in shape] )}.npy"""
def __a ( self : List[Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[int] , _A : Optional[Any]=0 , _A : List[Any]=(4, 3, 512, 512) , _A : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
lowercase : Tuple = torch.floataa if fpaa else torch.floataa
lowercase : int = torch.from_numpy(load_hf_numpy(self.get_file_format(a__ , a__ ) ) ).to(a__ ).to(a__ )
return image
def __a ( self : Optional[Any] , _A : Dict="CompVis/stable-diffusion-v1-4" , _A : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = '''fp16''' if fpaa else None
lowercase : Tuple = torch.floataa if fpaa else torch.floataa
lowercase : Union[str, Any] = AutoencoderKL.from_pretrained(
a__ , subfolder='''vae''' , torch_dtype=a__ , revision=a__ , )
model.to(a__ ).eval()
return model
def __a ( self : Union[str, Any] , _A : int=0 ) -> Dict:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(a__ )
return torch.Generator(device=a__ ).manual_seed(a__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __a ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = self.get_sd_vae_model()
lowercase : List[str] = self.get_sd_image(a__ )
lowercase : str = self.get_generator(a__ )
with torch.no_grad():
lowercase : str = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
lowercase : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase : List[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a__ , a__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def __a ( self : Union[str, Any] , _A : Union[str, Any] , _A : str ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.get_sd_vae_model(fpaa=a__ )
lowercase : Any = self.get_sd_image(a__ , fpaa=a__ )
lowercase : List[Any] = self.get_generator(a__ )
with torch.no_grad():
lowercase : Optional[int] = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
lowercase : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase : Optional[Any] = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __a ( self : Optional[Any] , _A : str , _A : Tuple , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple = self.get_sd_vae_model()
lowercase : List[Any] = self.get_sd_image(a__ )
with torch.no_grad():
lowercase : Dict = model(a__ ).sample
assert sample.shape == image.shape
lowercase : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase : str = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a__ , a__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def __a ( self : str , _A : Optional[int] , _A : Any ) -> int:
"""simple docstring"""
lowercase : Dict = self.get_sd_vae_model()
lowercase : Union[str, Any] = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase : List[str] = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase : List[str] = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase : int = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def __a ( self : List[Any] , _A : Any , _A : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self.get_sd_vae_model(fpaa=a__ )
lowercase : Tuple = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
lowercase : Union[str, Any] = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase : Any = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase : Dict = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __a ( self : Tuple , _A : Any ) -> int:
"""simple docstring"""
lowercase : Tuple = self.get_sd_vae_model(fpaa=a__ )
lowercase : Optional[int] = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
lowercase : List[Any] = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase : str = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a__ , a__ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __a ( self : int , _A : Optional[int] ) -> str:
"""simple docstring"""
lowercase : List[str] = self.get_sd_vae_model()
lowercase : str = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase : List[Any] = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase : List[Any] = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a__ , a__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def __a ( self : str , _A : Optional[Any] , _A : List[str] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = self.get_sd_vae_model()
lowercase : str = self.get_sd_image(a__ )
lowercase : Dict = self.get_generator(a__ )
with torch.no_grad():
lowercase : int = model.encode(a__ ).latent_dist
lowercase : Dict = dist.sample(generator=a__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase : List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase : Optional[Any] = torch.tensor(a__ )
lowercase : List[str] = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(a__ , a__ , atol=a__ ) | 308 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
"""simple docstring"""
import random
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
__lowerCAmelCase : Union[str, Any] = a[left_index]
__lowerCAmelCase : Union[str, Any] = left_index + 1
for j in range(left_index + 1 ,__snake_case ):
if a[j] < pivot:
__lowerCAmelCase , __lowerCAmelCase : Any = a[i], a[j]
i += 1
__lowerCAmelCase , __lowerCAmelCase : Any = a[i - 1], a[left_index]
return i - 1
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
if left < right:
__lowerCAmelCase : List[Any] = random.randint(__snake_case ,right - 1 )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowerCAmelCase : Tuple = partition(__snake_case ,__snake_case ,__snake_case )
quick_sort_random(
__snake_case ,__snake_case ,__snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__snake_case ,pivot_index + 1 ,__snake_case ) # recursive quicksort to the right of the pivot point
def _lowercase ( ) -> Any:
__lowerCAmelCase : Any = input("Enter numbers separated by a comma:\n" ).strip()
__lowerCAmelCase : List[Any] = [int(__snake_case ) for item in user_input.split("," )]
quick_sort_random(__snake_case ,0 ,len(__snake_case ) )
print(__snake_case )
if __name__ == "__main__":
main() | 58 |
"""simple docstring"""
from math import pi
def _lowercase ( __snake_case ,__snake_case ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 58 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = KandinskyVaaPriorPipeline
UpperCamelCase_ : Any = ['''prompt''']
UpperCamelCase_ : List[Any] = ['''prompt''', '''negative_prompt''']
UpperCamelCase_ : List[Any] = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ : int = False
@property
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 3_2
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return 3_2
@property
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return self.time_input_dim
@property
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return 1_0_0
@property
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowerCAmelCase__ )
@property
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : int = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_UpperCAmelCase : Optional[Any] = PriorTransformer(**lowerCAmelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_UpperCAmelCase : str = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
_UpperCAmelCase : List[str] = CLIPVisionModelWithProjection(lowerCAmelCase__ )
return model
@property
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.dummy_prior
_UpperCAmelCase : List[Any] = self.dummy_image_encoder
_UpperCAmelCase : Any = self.dummy_text_encoder
_UpperCAmelCase : int = self.dummy_tokenizer
_UpperCAmelCase : Tuple = self.dummy_image_processor
_UpperCAmelCase : List[Any] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=lowerCAmelCase__ , clip_sample_range=10.0 , )
_UpperCAmelCase : Optional[int] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=0 ) -> Dict:
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith("mps" ):
_UpperCAmelCase : Any = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "cpu"
_UpperCAmelCase : Dict = self.get_dummy_components()
_UpperCAmelCase : Dict = self.pipeline_class(**lowerCAmelCase__ )
_UpperCAmelCase : Any = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_UpperCAmelCase : Dict = output.image_embeds
_UpperCAmelCase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : Union[str, Any] = image[0, -1_0:]
_UpperCAmelCase : str = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
_UpperCAmelCase : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = torch_device == "cpu"
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Any = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , test_mean_pixel_difference=lowerCAmelCase__ , )
@skip_mps
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = torch_device == "cpu"
_UpperCAmelCase : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase__ , test_mean_pixel_difference=lowerCAmelCase__ , ) | 145 | '''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCAmelCase ( a_: str = "isbn/0140328726" ):
_UpperCAmelCase : Dict = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
_UpperCAmelCase : Optional[Any] = f"""{olid} is not a valid Open Library olid"""
raise ValueError(a_ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def __UpperCAmelCase ( a_: dict ):
_UpperCAmelCase : Dict = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
_UpperCAmelCase : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCAmelCase : Tuple = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
_UpperCAmelCase : Optional[int] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_, a_ ):
_UpperCAmelCase : List[Any] = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__a = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__a = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('\n'.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.') | 145 | 1 |
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any]=0 ) -> str:
'''simple docstring'''
return sorted(_snake_case , key=lambda _snake_case : x[column] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[int]=float("inf" ) ) -> Tuple:
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _snake_case ):
__magic_name__ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__magic_name__ : Any = current_dis
return min_dis
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : str , _snake_case : str=float("inf" ) ) -> Dict:
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _snake_case ):
for j in range(max(0 , i - 6 ) , _snake_case ):
__magic_name__ : str = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__magic_name__ : List[str] = current_dis
return min_dis
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> List[Any]:
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_snake_case , _snake_case )
# recursion
__magic_name__ : Tuple = points_counts // 2
__magic_name__ : Dict = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[:mid] , _snake_case )
__magic_name__ : Optional[int] = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[mid:] , points_counts - mid )
__magic_name__ : int = min(_snake_case , _snake_case )
__magic_name__ : Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_snake_case )
__magic_name__ : Tuple = dis_between_closest_in_strip(
_snake_case , len(_snake_case ) , _snake_case )
return min(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
__magic_name__ : Union[str, Any] = column_based_sort(_snake_case , column=0 )
__magic_name__ : List[Any] = column_based_sort(_snake_case , column=1 )
return (
closest_pair_of_points_sqr(
_snake_case , _snake_case , _snake_case )
) ** 0.5
if __name__ == "__main__":
snake_case : List[str] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 41 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( snake_case ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'BridgeTowerImageProcessor'
UpperCamelCase__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _a , _a ):
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
__magic_name__ : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
__magic_name__ : List[str] = self.image_processor(
_a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a )
encoding.update(_a )
return encoding
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.tokenizer.model_input_names
__magic_name__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 41 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase :int = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 384}
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Optional[int] = do_resize
A_ : Tuple = size
# Default value set here for backwards compatibility where the value in config is None
A_ : int = crop_pct if crop_pct is not None else 224 / 256
A_ : int = resample
A_ : Tuple = do_rescale
A_ : Tuple = rescale_factor
A_ : Dict = do_normalize
A_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a (self , lowercase , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A_ : List[Any] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A_ : Dict = int(shortest_edge / crop_pct )
A_ : Any = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase )
A_ : Union[str, Any] = resize(image=lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase , size=(shortest_edge, shortest_edge) , data_format=lowercase , **lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase , size=(shortest_edge, shortest_edge) , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : Dict = do_resize if do_resize is not None else self.do_resize
A_ : Dict = crop_pct if crop_pct is not None else self.crop_pct
A_ : List[str] = resample if resample is not None else self.resample
A_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
A_ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : str = size if size is not None else self.size
A_ : List[Any] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Optional[int] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A_ : List[Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : Tuple = [self.resize(image=lowercase , size=lowercase , crop_pct=lowercase , resample=lowercase ) for image in images]
if do_rescale:
A_ : Optional[Any] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Union[str, Any] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 206 |
'''simple docstring'''
from math import pow
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A_ : Optional[int] = int(pow(lowerCamelCase__ , lowerCamelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A_, A_ : int = backtrack(
lowerCamelCase__ , lowerCamelCase__ , current_number + 1 , lowerCamelCase__ , lowerCamelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A_, A_ : int = backtrack(
lowerCamelCase__ , lowerCamelCase__ , current_number + 1 , lowerCamelCase__ , lowerCamelCase__ )
return current_sum, solutions_count
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowerCamelCase__ , lowerCamelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod() | 206 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=2 , _lowercase=3 , _lowercase=4 , _lowercase=2 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=36 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=6 , _lowercase=6 , _lowercase=3 , _lowercase=4 , _lowercase=None , _lowercase=1_000 , )-> Any:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = coordinate_size
UpperCamelCase_ = shape_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
UpperCamelCase_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase_ = text_seq_length
UpperCamelCase_ = (image_size // patch_size) ** 2 + 1
UpperCamelCase_ = self.text_seq_length + self.image_seq_length
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase_ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase_ = bbox[i, j, 3]
UpperCamelCase_ = bbox[i, j, 1]
UpperCamelCase_ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase_ = bbox[i, j, 2]
UpperCamelCase_ = bbox[i, j, 0]
UpperCamelCase_ = tmp_coordinate
UpperCamelCase_ = tf.constant(_lowercase )
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )-> int:
UpperCamelCase_ = TFLayoutLMvaModel(config=_lowercase )
# text + image
UpperCamelCase_ = model(_lowercase , pixel_values=_lowercase , training=_lowercase )
UpperCamelCase_ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , training=_lowercase , )
UpperCamelCase_ = model(_lowercase , bbox=_lowercase , pixel_values=_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase_ = model(_lowercase , training=_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase_ = model({"pixel_values": pixel_values} , training=_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )-> List[Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = TFLayoutLMvaForSequenceClassification(config=_lowercase )
UpperCamelCase_ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , training=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )-> Tuple:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = TFLayoutLMvaForTokenClassification(config=_lowercase )
UpperCamelCase_ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , training=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = 2
UpperCamelCase_ = TFLayoutLMvaForQuestionAnswering(config=_lowercase )
UpperCamelCase_ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , training=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
((UpperCamelCase_) , (UpperCamelCase_) , (UpperCamelCase_) , (UpperCamelCase_) , (UpperCamelCase_) , (UpperCamelCase_) , (UpperCamelCase_) , (UpperCamelCase_)) = config_and_inputs
UpperCamelCase_ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :str = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCamelCase_ :int = False
UpperCamelCase_ :Any = False
UpperCamelCase_ :Union[str, Any] = False
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )-> Any:
return True
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase=False )-> dict:
UpperCamelCase_ = copy.deepcopy(_lowercase )
if model_class in get_values(_lowercase ):
UpperCamelCase_ = {
k: tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_lowercase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowercase ):
UpperCamelCase_ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowercase ):
UpperCamelCase_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowercase ):
UpperCamelCase_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowercase ):
UpperCamelCase_ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = TFLayoutLMvaModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
if getattr(_lowercase , "hf_compute_loss" , _lowercase ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase_ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_lowercase )[0]
]
UpperCamelCase_ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase_ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = prepared_for_class.pop("input_ids" )
UpperCamelCase_ = model(_lowercase , **_lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase_ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
UpperCamelCase_ = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase_ = -100
UpperCamelCase_ = tf.convert_to_tensor(_lowercase )
UpperCamelCase_ = model(_lowercase , **_lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase_ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = model(_lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase_ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase_ = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase_ = inspect.signature(model.call ).parameters
UpperCamelCase_ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase_ = {0: "input_ids"}
for label_key in label_keys:
UpperCamelCase_ = signature_names.index(_lowercase )
UpperCamelCase_ = label_key
UpperCamelCase_ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase_ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase_ = prepared_for_class[value]
UpperCamelCase_ = tuple(_lowercase )
# Send to model
UpperCamelCase_ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase_ ( self )-> List[Any]:
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[Any]:
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ = type
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Dict:
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[Any]:
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Any:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = TFLayoutLMvaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase( )-> int:
"""simple docstring"""
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class __magic_name__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self )-> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=_lowercase ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_lowercase , return_tensors="tf" ).pixel_values
UpperCamelCase_ = tf.constant([[1, 2]] )
UpperCamelCase_ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase_ = model(input_ids=_lowercase , bbox=_lowercase , pixel_values=_lowercase , training=_lowercase )
# verify the logits
UpperCamelCase_ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _lowercase )
UpperCamelCase_ = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ) )
| 60 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Dict:
"""simple docstring"""
UpperCamelCase_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
UpperCamelCase_ = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
UpperCamelCase_ = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase_ = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCamelCase_ = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "norm" in key:
UpperCamelCase_ = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase_ = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
UpperCamelCase_ = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "layer_norm1" in key:
UpperCamelCase_ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCamelCase_ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase_ = key[key.find("block" ) + len("block" )]
UpperCamelCase_ = key.replace(f"block{idx}" , f"block.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "attn.q" in key:
UpperCamelCase_ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCamelCase_ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCamelCase_ = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCamelCase_ = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCamelCase_ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCamelCase_ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCamelCase_ = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCamelCase_ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase_ = key[key.find("linear_c" ) + len("linear_c" )]
UpperCamelCase_ = key.replace(f"linear_c{idx}" , f"linear_c.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "bot_conv" in key:
UpperCamelCase_ = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
UpperCamelCase_ = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
UpperCamelCase_ = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
UpperCamelCase_ = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
UpperCamelCase_ = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
UpperCamelCase_ = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
UpperCamelCase_ = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
UpperCamelCase_ = key.replace("module.last_layer_depth" , "head.head" )
UpperCamelCase_ = value
return new_state_dict
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase_ = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCamelCase_ = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase_ = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase_ = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase( )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None )-> int:
"""simple docstring"""
UpperCamelCase_ = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCamelCase_ = GLPNImageProcessor()
# prepare image
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
UpperCamelCase_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=torch.device("cpu" ) )
# rename keys
UpperCamelCase_ = rename_keys(SCREAMING_SNAKE_CASE_ )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create HuggingFace model and load state dict
UpperCamelCase_ = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# forward pass
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase_ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase_ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f"Unknown model name: {model_name}" )
UpperCamelCase_ = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 60 | 1 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__SCREAMING_SNAKE_CASE =datasets.logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ="\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
__SCREAMING_SNAKE_CASE ="\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n"
__SCREAMING_SNAKE_CASE ="\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
__SCREAMING_SNAKE_CASE ={
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/google-research/bleurt' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/google-research/bleurt'] ,reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
lowercase_ : Dict = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
lowercase_ : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowercase_ : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
lowercase_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowercase_ : Dict = score.BleurtScorer(os.path.join(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = self.scorer.score(references=__UpperCamelCase ,candidates=__UpperCamelCase )
return {"scores": scores}
| 213 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = ''''''
lowerCamelCase_ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any:
'''simple docstring'''
super().__init__(self , **__magic_name__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ : Union[str, Any] = fsspec.open(
__magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] )
snake_case_ : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case_ : Dict = None
@classmethod
def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
return super()._strip_protocol(__magic_name__ ).lstrip('''/''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
snake_case_ : List[str] = {f['''name''']: f}
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return self.file.open().read()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''bz2'''
lowerCamelCase_ : Any = '''bz2'''
lowerCamelCase_ : int = '''.bz2'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''gzip'''
lowerCamelCase_ : Dict = '''gzip'''
lowerCamelCase_ : int = '''.gz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Optional[Any] = '''.lz4'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''xz'''
lowerCamelCase_ : Any = '''xz'''
lowerCamelCase_ : int = '''.xz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''zstd'''
lowerCamelCase_ : Tuple = '''zstd'''
lowerCamelCase_ : Any = '''.zst'''
def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ : Dict = self.file.__enter__
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = file_
def __enter__(self ) -> List[Any]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
self._file.__exit__(*__magic_name__ , **__magic_name__ )
def __iter__(self ) -> Optional[int]:
'''simple docstring'''
return iter(self._file )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return next(self._file )
def __getattr__(self , __magic_name__ ) -> str:
'''simple docstring'''
return getattr(self._file , __magic_name__ )
def fixed_enter(*__magic_name__ , **__magic_name__ ):
return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) )
snake_case_ : Tuple = fixed_enter
| 279 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a__ : int = logging.get_logger(__name__)
class UpperCamelCase_ ( a__):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ) -> Union[str, Any]:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 354 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_flip_channel_order
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> Dict:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__SCREAMING_SNAKE_CASE = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 195 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=True , A=False , A=False , A=False , A=2 , A=9_9 , A=0 , A=3_2 , A=5 , A=4 , A=0.1 , A=0.1 , A=5_1_2 , A=1_2 , A=2 , A=0.02 , A=3 , A=4 , A="last" , A=None , A=None , ) -> Union[str, Any]:
snake_case : Optional[int] = parent
snake_case : Dict = batch_size
snake_case : int = seq_length
snake_case : List[str] = is_training
snake_case : List[Any] = use_input_lengths
snake_case : List[str] = use_token_type_ids
snake_case : List[str] = use_labels
snake_case : Union[str, Any] = gelu_activation
snake_case : Any = sinusoidal_embeddings
snake_case : Union[str, Any] = causal
snake_case : Dict = asm
snake_case : Any = n_langs
snake_case : Tuple = vocab_size
snake_case : List[str] = n_special
snake_case : Optional[int] = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Tuple = type_sequence_label_size
snake_case : str = initializer_range
snake_case : Optional[Any] = num_labels
snake_case : Optional[int] = num_choices
snake_case : List[str] = summary_type
snake_case : Dict = use_proj
snake_case : int = scope
def UpperCAmelCase ( self ) -> Any:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[str] = None
if self.use_input_lengths:
snake_case : Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case : Optional[Any] = None
if self.use_token_type_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case : Optional[int] = None
snake_case : List[Any] = None
snake_case : int = None
if self.use_labels:
snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , 2 ).float()
snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self ) -> List[str]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[int]:
snake_case : Optional[int] = FlaubertModel(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , lengths=A , langs=A )
snake_case : Any = model(A , langs=A )
snake_case : Dict = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> int:
snake_case : List[str] = FlaubertWithLMHeadModel(A )
model.to(A )
model.eval()
snake_case : Any = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
snake_case : Tuple = FlaubertForQuestionAnsweringSimple(A )
model.to(A )
model.eval()
snake_case : Any = model(A )
snake_case : Optional[Any] = model(A , start_positions=A , end_positions=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Dict:
snake_case : List[Any] = FlaubertForQuestionAnswering(A )
model.to(A )
model.eval()
snake_case : Optional[Any] = model(A )
snake_case : Optional[int] = model(
A , start_positions=A , end_positions=A , cls_index=A , is_impossible=A , p_mask=A , )
snake_case : List[Any] = model(
A , start_positions=A , end_positions=A , cls_index=A , is_impossible=A , )
((snake_case) , ) : int = result_with_labels.to_tuple()
snake_case : List[Any] = model(A , start_positions=A , end_positions=A )
((snake_case) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> int:
snake_case : List[str] = FlaubertForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : str = model(A )
snake_case : Optional[Any] = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Dict:
snake_case : List[str] = self.num_labels
snake_case : Optional[Any] = FlaubertForTokenClassification(A )
model.to(A )
model.eval()
snake_case : Optional[int] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> int:
snake_case : Dict = self.num_choices
snake_case : Union[str, Any] = FlaubertForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Tuple = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Any = config_and_inputs
snake_case : Dict = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self , A , A , A , A , A ) -> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self , A , A , A=False ) -> List[Any]:
snake_case : Optional[int] = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
snake_case : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Dict = FlaubertModelTester(self )
snake_case : Any = ConfigTester(self , config_class=A , emb_dim=3_7 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*A )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*A )
@slow
def UpperCAmelCase ( self ) -> int:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Tuple = FlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def UpperCAmelCase ( self ) -> int:
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
snake_case : Any = True
snake_case : Tuple = model_class(config=A )
snake_case : Union[str, Any] = self._prepare_for_class(A , A )
snake_case : Optional[Any] = torch.jit.trace(
A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A , os.path.join(A , """traced_model.pt""" ) )
snake_case : Optional[Any] = torch.jit.load(os.path.join(A , """traced_model.pt""" ) , map_location=A )
loaded(inputs_dict["""input_ids"""].to(A ) , inputs_dict["""attention_mask"""].to(A ) )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Tuple = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
snake_case : int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
snake_case : Tuple = model(A )[0]
snake_case : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , A )
snake_case : Any = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
| 124 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case : Optional[int] = str(bin(lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case : Dict = str(bin(lowercase ) )[2:]
if shift_amount >= len(lowercase ):
return "0b0"
snake_case : str = binary_number[: len(lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number >= 0: # Get binary representation of positive number
snake_case : Optional[Any] = """0""" + str(bin(lowercase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case : Dict = len(bin(lowercase )[3:] ) # Find 2's complement of number
snake_case : Optional[Any] = bin(abs(lowercase ) - (1 << binary_number_length) )[3:]
snake_case : Tuple = (
"""1""" + """0""" * (binary_number_length - len(lowercase )) + binary_number
)
if shift_amount >= len(lowercase ):
return "0b" + binary_number[0] * len(lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 | 1 |
import os
import sys
import unittest
_UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_UpperCAmelCase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_UpperCAmelCase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Dict = get_test_to_tester_mapping(lowercase )
A_ : Optional[int] = {'BertModelTest': 'BertModelTester'}
A_ : Tuple = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = get_model_to_test_mapping(lowercase )
A_ : int = get_model_to_test_mapping(lowercase )
A_ : Union[str, Any] = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
A_ : str = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = get_model_to_tester_mapping(lowercase )
A_ : str = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
A_ : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
| 354 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 192 | 0 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ (a_ ):
def __init__( self ):
'''simple docstring'''
UpperCAmelCase = []
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_init_end''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_train_begin''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_train_end''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_epoch_begin''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_epoch_end''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_step_begin''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_step_end''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_evaluate''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_predict''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_save''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_log''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_prediction_step''' )
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
def _lowercase ( self ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def _lowercase ( self , _A=0 , _A=0 , _A=6_4 , _A=6_4 , _A=None , _A=False , **_A ):
'''simple docstring'''
UpperCAmelCase = RegressionDataset(length=_A )
UpperCAmelCase = RegressionDataset(length=_A )
UpperCAmelCase = RegressionModelConfig(a=_A , b=_A )
UpperCAmelCase = RegressionPreTrainedModel(_A )
UpperCAmelCase = TrainingArguments(self.output_dir , disable_tqdm=_A , report_to=[] , **_A )
return Trainer(
_A , _A , train_dataset=_A , eval_dataset=_A , callbacks=_A , )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
self.assertEqual(len(_A ) , len(_A ) )
# Order doesn't matter
UpperCAmelCase = sorted(_A , key=lambda _A : cb.__name__ if isinstance(_A , _A ) else cb.__class__.__name__ )
UpperCAmelCase = sorted(_A , key=lambda _A : cb.__name__ if isinstance(_A , _A ) else cb.__class__.__name__ )
for cba, cba in zip(_A , _A ):
if isinstance(_A , _A ) and isinstance(_A , _A ):
self.assertEqual(_A , _A )
elif isinstance(_A , _A ) and not isinstance(_A , _A ):
self.assertEqual(_A , cba.__class__ )
elif not isinstance(_A , _A ) and isinstance(_A , _A ):
self.assertEqual(cba.__class__ , _A )
else:
self.assertEqual(_A , _A )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = ['''on_init_end''', '''on_train_begin''']
UpperCAmelCase = 0
UpperCAmelCase = len(trainer.get_eval_dataloader() )
UpperCAmelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(_A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_trainer()
UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase = self.get_trainer(disable_tqdm=_A )
UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_A )
expected_callbacks.remove(_A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
UpperCAmelCase = self.get_trainer()
UpperCAmelCase = trainer.pop_callback(_A )
self.assertEqual(cb.__class__ , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
trainer.add_callback(_A )
expected_callbacks.insert(0 , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
# We can also add, pop, or remove by instance
UpperCAmelCase = self.get_trainer()
UpperCAmelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_A )
expected_callbacks.remove(_A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
UpperCAmelCase = self.get_trainer()
UpperCAmelCase = trainer.callback_handler.callbacks[0]
UpperCAmelCase = trainer.pop_callback(_A )
self.assertEqual(_A , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
trainer.add_callback(_A )
expected_callbacks.insert(0 , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
def _lowercase ( self ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=_A )
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
# Independent log/save/eval
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
# A bit of everything
UpperCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
UpperCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_A ) in warn_mock.call_args[0][0]
| 273 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple= {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_a : Tuple= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353 | """simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__snake_case : Optional[int] = iter(UpperCAmelCase_ )
while True:
__snake_case : Optional[int] = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Any = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__snake_case : Union[str, Any] = ''
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> list[str]:
'''simple docstring'''
__snake_case : List[str] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__snake_case : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : str = generate_table(UpperCAmelCase_ )
__snake_case : Union[str, Any] = prepare_input(UpperCAmelCase_ )
__snake_case : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Any = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = generate_table(UpperCAmelCase_ )
__snake_case : Any = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Union[str, Any] = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 95 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowerCAmelCase ( )->Any:
'''simple docstring'''
snake_case_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=a_ )
snake_case_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(a_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main()
| 159 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
__A = args.pruning_method
__A = args.threshold
__A = args.model_name_or_path.rstrip("/" )
__A = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
__A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) )
__A = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__A = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
__A = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
__A = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
__A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A = TopKBinarizer.apply(a_ , a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A = ThresholdBinarizer.apply(a_ , a_ , a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A , __A = -0.1, 1.1
__A = torch.sigmoid(a_ )
__A = s * (r - l) + l
__A = s_bar.clamp(min=0.0 , max=1.0 )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
__A = os.path.join(
os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' )
if not os.path.isdir(a_ ):
shutil.copytree(a_ , a_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
SCREAMING_SNAKE_CASE :str = parser.parse_args()
main(args)
| 15 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
_lowerCamelCase = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
_lowerCamelCase = "question"
_lowerCamelCase = "context"
_lowerCamelCase = "answers"
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 77 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase =logging.get_logger(__name__)
def _A ( _a : List[str] ):
"""simple docstring"""
if isinstance(_a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_a ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = PILImageResampling.BILINEAR ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,lowerCamelCase_ = 1 / 2_5_5 ,lowerCamelCase_ = True ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = size if size is not None else {"""shortest_edge""": 2_5_6}
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = do_resize
A = size
A = do_center_crop
A = crop_size
A = resample
A = do_rescale
A = rescale_factor
A = offset
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = PILImageResampling.BILINEAR ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
if "shortest_edge" in size:
A = get_resize_output_image_size(lowerCamelCase_ ,size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ )
elif "height" in size and "width" in size:
A = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> List[str]:
A = image.astype(np.floataa )
if offset:
A = image - (scale / 2)
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
A = to_numpy_array(lowerCamelCase_ )
if do_resize:
A = self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ )
if do_center_crop:
A = self.center_crop(lowerCamelCase_ ,size=lowerCamelCase_ )
if do_rescale:
A = self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ,offset=lowerCamelCase_ )
if do_normalize:
A = self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ )
A = to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ )
return image
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,**lowerCamelCase_ ,) -> PIL.Image.Image:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = offset if offset is not None else self.offset
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
A = make_batched(lowerCamelCase_ )
A = [
[
self._preprocess_image(
image=lowerCamelCase_ ,do_resize=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,do_center_crop=lowerCamelCase_ ,crop_size=lowerCamelCase_ ,do_rescale=lowerCamelCase_ ,rescale_factor=lowerCamelCase_ ,offset=lowerCamelCase_ ,do_normalize=lowerCamelCase_ ,image_mean=lowerCamelCase_ ,image_std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,)
for img in video
]
for video in videos
]
A = {"""pixel_values""": videos}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 77 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=0.9_9_9 , _UpperCAmelCase : Tuple="cosine" , ) -> int:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : Optional[int] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : Optional[int] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_UpperCAmelCase : str = []
for i in range(_UpperCAmelCase ):
_UpperCAmelCase : int = i / num_diffusion_timesteps
_UpperCAmelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase: List[Any] = 2
@register_to_config
def __init__( self : str , A : int = 1000 , A : float = 0.00_085 , A : float = 0.012 , A : str = "linear" , A : Optional[Union[np.ndarray, List[float]]] = None , A : str = "epsilon" , A : Optional[bool] = False , A : Optional[bool] = False , A : float = 1.0 , A : str = "linspace" , A : int = 0 , ):
if trained_betas is not None:
_UpperCAmelCase : Optional[Any] = torch.tensor(A , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase : Dict = torch.linspace(A , A , A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : Optional[Any] = betas_for_alpha_bar(A , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
_UpperCAmelCase : List[str] = betas_for_alpha_bar(A , alpha_transform_type="exp" )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_UpperCAmelCase : Optional[int] = 1.0 - self.betas
_UpperCAmelCase : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A , A , A )
_UpperCAmelCase : Union[str, Any] = use_karras_sigmas
def _A ( self : Tuple , A : List[Any] , A : str=None ):
if schedule_timesteps is None:
_UpperCAmelCase : Dict = self.timesteps
_UpperCAmelCase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_UpperCAmelCase : Optional[int] = 1 if len(A ) > 1 else 0
else:
_UpperCAmelCase : List[str] = timestep.cpu().item() if torch.is_tensor(A ) else timestep
_UpperCAmelCase : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _A ( self : List[str] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _A ( self : Optional[Any] , A : torch.FloatTensor , A : Union[float, torch.FloatTensor] , ):
_UpperCAmelCase : List[str] = self.index_for_timestep(A )
_UpperCAmelCase : int = self.sigmas[step_index]
_UpperCAmelCase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _A ( self : Optional[Any] , A : int , A : Union[str, torch.device] = None , A : Optional[int] = None , ):
_UpperCAmelCase : str = num_inference_steps
_UpperCAmelCase : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCAmelCase : List[str] = np.linspace(0 , num_train_timesteps - 1 , A , dtype=A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCAmelCase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Tuple = (np.arange(0 , A ) * step_ratio).round()[::-1].copy().astype(A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCAmelCase : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Optional[Any] = (np.arange(A , 0 , -step_ratio )).round().copy().astype(A )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_UpperCAmelCase : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_UpperCAmelCase : List[Any] = np.log(A )
_UpperCAmelCase : int = np.interp(A , np.arange(0 , len(A ) ) , A )
if self.config.use_karras_sigmas:
_UpperCAmelCase : List[str] = self._convert_to_karras(in_sigmas=A , num_inference_steps=self.num_inference_steps )
_UpperCAmelCase : str = np.array([self._sigma_to_t(A , A ) for sigma in sigmas] )
_UpperCAmelCase : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_UpperCAmelCase : List[str] = torch.from_numpy(A ).to(device=A )
_UpperCAmelCase : Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_UpperCAmelCase : Optional[Any] = torch.from_numpy(A )
_UpperCAmelCase : Optional[Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A ).startswith("mps" ):
# mps does not support float64
_UpperCAmelCase : Union[str, Any] = timesteps.to(A , dtype=torch.floataa )
else:
_UpperCAmelCase : List[str] = timesteps.to(device=A )
# empty dt and derivative
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCAmelCase : int = defaultdict(A )
def _A ( self : Tuple , A : Optional[int] , A : Tuple ):
# get log sigma
_UpperCAmelCase : Dict = np.log(A )
# get distribution
_UpperCAmelCase : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_UpperCAmelCase : Union[str, Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_UpperCAmelCase : List[Any] = low_idx + 1
_UpperCAmelCase : List[Any] = log_sigmas[low_idx]
_UpperCAmelCase : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_UpperCAmelCase : Optional[Any] = (low - log_sigma) / (low - high)
_UpperCAmelCase : List[str] = np.clip(A , 0 , 1 )
# transform interpolation to time range
_UpperCAmelCase : Tuple = (1 - w) * low_idx + w * high_idx
_UpperCAmelCase : Optional[Any] = t.reshape(sigma.shape )
return t
def _A ( self : Tuple , A : torch.FloatTensor , A : List[str] ):
_UpperCAmelCase : float = in_sigmas[-1].item()
_UpperCAmelCase : float = in_sigmas[0].item()
_UpperCAmelCase : Union[str, Any] = 7.0 # 7.0 is the value used in the paper
_UpperCAmelCase : List[Any] = np.linspace(0 , 1 , A )
_UpperCAmelCase : List[Any] = sigma_min ** (1 / rho)
_UpperCAmelCase : Tuple = sigma_max ** (1 / rho)
_UpperCAmelCase : List[Any] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _A ( self : List[Any] ):
return self.dt is None
def _A ( self : Tuple , A : Union[torch.FloatTensor, np.ndarray] , A : Union[float, torch.FloatTensor] , A : Union[torch.FloatTensor, np.ndarray] , A : bool = True , ):
_UpperCAmelCase : Dict = self.index_for_timestep(A )
# advance index counter by 1
_UpperCAmelCase : int = timestep.cpu().item() if torch.is_tensor(A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCAmelCase : Any = self.sigmas[step_index]
_UpperCAmelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_UpperCAmelCase : Optional[int] = self.sigmas[step_index - 1]
_UpperCAmelCase : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCAmelCase : str = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase : int = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : str = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_UpperCAmelCase : Tuple = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_UpperCAmelCase : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCAmelCase : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCAmelCase : List[str] = sigma_next - sigma_hat
# store for 2nd order step
_UpperCAmelCase : Optional[int] = derivative
_UpperCAmelCase : List[str] = dt
_UpperCAmelCase : int = sample
else:
# 2. 2nd order / Heun's method
_UpperCAmelCase : str = (sample - pred_original_sample) / sigma_next
_UpperCAmelCase : Optional[int] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_UpperCAmelCase : Union[str, Any] = self.dt
_UpperCAmelCase : Optional[Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def _A ( self : Optional[Any] , A : torch.FloatTensor , A : torch.FloatTensor , A : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_UpperCAmelCase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A ):
# mps does not support float64
_UpperCAmelCase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_UpperCAmelCase : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_UpperCAmelCase : Tuple = self.timesteps.to(original_samples.device )
_UpperCAmelCase : int = timesteps.to(original_samples.device )
_UpperCAmelCase : Optional[Any] = [self.index_for_timestep(A , A ) for t in timesteps]
_UpperCAmelCase : Any = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_UpperCAmelCase : Dict = sigma.unsqueeze(-1 )
_UpperCAmelCase : List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[str] ):
return self.config.num_train_timesteps
| 31 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A : Dict , A : Dict=7 , A : Optional[int]=3 , A : Optional[int]=18 , A : Dict=30 , A : List[Any]=400 , A : Union[str, Any]=True , A : Tuple=None , A : List[Any]=True , A : int=None , A : Optional[int]=True , ):
_UpperCAmelCase : Optional[int] = size if size is not None else {"shortest_edge": 20}
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : int = min_resolution
_UpperCAmelCase : Optional[int] = max_resolution
_UpperCAmelCase : List[str] = do_resize
_UpperCAmelCase : Optional[Any] = size
_UpperCAmelCase : Tuple = do_center_crop
_UpperCAmelCase : Optional[int] = crop_size
_UpperCAmelCase : Optional[Any] = do_flip_channel_order
def _A ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Tuple = MobileViTImageProcessor if is_vision_available() else None
def _A ( self : List[Any] ):
_UpperCAmelCase : Any = MobileViTImageProcessingTester(self )
@property
def _A ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Tuple ):
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "size" ) )
self.assertTrue(hasattr(A , "do_center_crop" ) )
self.assertTrue(hasattr(A , "center_crop" ) )
self.assertTrue(hasattr(A , "do_flip_channel_order" ) )
def _A ( self : Any ):
_UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _A ( self : Any ):
pass
def _A ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[Any] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : Union[str, Any] ):
# Initialize image_processing
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[int] = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : Any ):
# Initialize image_processing
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Any = image_processing(A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 31 | 1 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase :str = logging.getLogger(__name__)
lowerCAmelCase :List[Any] = '''pytorch_model.bin'''
@dataclasses.dataclass
class _lowerCamelCase :
'''simple docstring'''
A_ : str = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
A_ : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class _lowerCamelCase :
'''simple docstring'''
A_ : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
A_ : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
A_ : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
A_ : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={"""help""": """The name of the task to train on."""} , )
A_ : Optional[List[str]] = dataclasses.field(
default=lowercase__ , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class _lowerCamelCase :
'''simple docstring'''
A_ : str = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
A_ : Optional[str] = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
A_ : Optional[str] = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
A_ : Optional[int] = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
A_ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
A_ : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
A_ : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
A_ : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
A_ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
A_ : Optional[int] = dataclasses.field(
default=1_00 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
A_ : Optional[int] = dataclasses.field(
default=lowercase__ , metadata={"""help""": """Random seed for initialization."""} , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
__magic_name__ : List[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__magic_name__ : List[Any] = dataset.filter(lambda lowerCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__magic_name__ : Any = int(eval_result * len(lowerCAmelCase ) )
print(lowerCAmelCase )
__magic_name__ : Any = dataset.sort('probability' , reverse=lowerCAmelCase )
__magic_name__ : Dict = dataset.select(range(lowerCAmelCase ) )
__magic_name__ : Dict = dataset.remove_columns(['label', 'probability'] )
__magic_name__ : Tuple = dataset.rename_column('prediction' , 'label' )
__magic_name__ : List[Any] = dataset.map(lambda lowerCAmelCase : {"label": idalabel[example["label"]]} )
__magic_name__ : Any = dataset.shuffle(seed=args.seed )
__magic_name__ : List[str] = os.path.join(lowerCAmelCase , f'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase , index=lowerCAmelCase )
else:
dataset.to_json(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__magic_name__ : List[str] = STModelArguments(model_name_or_path=lowerCAmelCase )
__magic_name__ : Dict = STDataArguments(train_file=lowerCAmelCase , infer_file=lowerCAmelCase )
__magic_name__ : int = STTrainingArguments(output_dir=lowerCAmelCase )
__magic_name__ : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase ).items():
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for key, value in kwargs.items():
if hasattr(lowerCAmelCase , lowerCAmelCase ):
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Sanity checks
__magic_name__ : int = {}
__magic_name__ : str = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__magic_name__ : int = args.train_file
__magic_name__ : Optional[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__magic_name__ : Dict = args.eval_file
for key in data_files:
__magic_name__ : List[str] = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__magic_name__ : List[Any] = extension
else:
assert extension == args.data_file_extension, f'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), f'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__magic_name__ : List[Any] = f'{args.output_dir}/self-train_iter-{{}}'.format
__magic_name__ : List[Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
accelerator.wait_for_everyone()
__magic_name__ : Optional[int] = None
__magic_name__ : Dict = None
__magic_name__ : Any = 0
__magic_name__ : Tuple = False
# Show the progress bar
__magic_name__ : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__magic_name__ : List[Any] = data_dir_format(lowerCAmelCase )
assert os.path.exists(lowerCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__magic_name__ : int = os.path.join(lowerCAmelCase , 'stage-1' )
__magic_name__ : Optional[Any] = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase , lowerCAmelCase ):
arguments_dict.update({key: value} )
__magic_name__ : Optional[int] = os.path.join(lowerCAmelCase , 'best-checkpoint' , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , lowerCAmelCase , lowerCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , lowerCAmelCase )
finetune(**lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , lowerCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__magic_name__ : Any = os.path.join(lowerCAmelCase , 'best-checkpoint' )
__magic_name__ : Optional[int] = os.path.join(lowerCAmelCase , 'stage-2' )
# Update arguments_dict
__magic_name__ : Tuple = model_path
__magic_name__ : Union[str, Any] = data_files['train']
__magic_name__ : Optional[Any] = current_output_dir
__magic_name__ : int = os.path.join(lowerCAmelCase , 'best-checkpoint' , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , lowerCAmelCase , lowerCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , lowerCAmelCase )
finetune(**lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , lowerCAmelCase )
__magic_name__ : Tuple = iteration
__magic_name__ : List[str] = data_dir_format(iteration + 1 )
__magic_name__ : Any = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase , 'best-checkpoint' ) )
__magic_name__ : List[Any] = config.idalabel
__magic_name__ : int = os.path.join(lowerCAmelCase , 'eval_results_best-checkpoint.json' )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'test_results_best-checkpoint.json' )
assert os.path.exists(lowerCAmelCase )
with open(lowerCAmelCase , 'r' ) as f:
__magic_name__ : str = float(json.load(lowerCAmelCase )[args.eval_metric] )
__magic_name__ : Optional[Any] = os.path.join(lowerCAmelCase , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(lowerCAmelCase )
# Loading the dataset from local csv or json files.
__magic_name__ : Tuple = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__magic_name__ : Optional[int] = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
shutil.copy(lowerCAmelCase , os.path.join(lowerCAmelCase , f'eval_results_iter-{iteration}.json' ) )
if os.path.exists(lowerCAmelCase ):
shutil.copy(lowerCAmelCase , os.path.join(lowerCAmelCase , f'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.wait_for_everyone()
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , f'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__magic_name__ : Optional[Any] = eval_result
if best_iteration is None:
__magic_name__ : Dict = new_iteration
__magic_name__ : Tuple = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__magic_name__ : List[str] = new_iteration
__magic_name__ : Dict = new_eval_result
__magic_name__ : Any = 0
else:
if new_eval_result == best_eval_result:
__magic_name__ : Union[str, Any] = new_iteration
__magic_name__ : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__magic_name__ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , lowerCAmelCase )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase , f'eval_results_iter-{iteration}.json' ) , os.path.join(lowerCAmelCase , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase , f'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(lowerCAmelCase , 'eval_results_best-iteration.json' ) , ) | 275 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : int = StableDiffusionXLImgaImgPipeline
A_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
A_ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
__magic_name__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=_A , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__magic_name__ : str = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
__magic_name__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
__magic_name__ : Dict = CLIPTextModel(_A )
__magic_name__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_A )
__magic_name__ : Optional[Any] = CLIPTextModelWithProjection(_A )
__magic_name__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_A )
__magic_name__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : Any=0 ) -> Union[str, Any]:
__magic_name__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Dict = image / 2 + 0.5
if str(_A ).startswith('mps' ):
__magic_name__ : Any = torch.manual_seed(_A )
else:
__magic_name__ : int = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
__magic_name__ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ : str = self.get_dummy_components()
__magic_name__ : Any = StableDiffusionXLImgaImgPipeline(**_A )
__magic_name__ : List[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Any = self.get_dummy_inputs(_A )
__magic_name__ : Optional[int] = sd_pipe(**_A ).images
__magic_name__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : Any = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self : List[Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Dict = self.get_dummy_components()
__magic_name__ : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_A )
__magic_name__ : List[Any] = sd_pipe.to(_A )
__magic_name__ : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
# forward without prompt embeds
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(_A )
__magic_name__ : Union[str, Any] = 3 * ['this is a negative prompt']
__magic_name__ : List[str] = negative_prompt
__magic_name__ : int = 3 * [inputs['prompt']]
__magic_name__ : Tuple = sd_pipe(**_A )
__magic_name__ : str = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__magic_name__ : Optional[Any] = self.get_dummy_inputs(_A )
__magic_name__ : Tuple = 3 * ['this is a negative prompt']
__magic_name__ : List[str] = 3 * [inputs.pop('prompt' )]
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : List[Any] = sd_pipe.encode_prompt(_A , negative_prompt=_A )
__magic_name__ : Tuple = sd_pipe(
**_A , prompt_embeds=_A , negative_prompt_embeds=_A , pooled_prompt_embeds=_A , negative_pooled_prompt_embeds=_A , )
__magic_name__ : int = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str , _A : Optional[int] , _A : Optional[Any]="cpu" , _A : List[str]=torch.floataa , _A : Any=0 ) -> str:
__magic_name__ : List[str] = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : Optional[Any] = np.random.RandomState(_A ).standard_normal((1, 4, 64, 64) )
__magic_name__ : Union[str, Any] = torch.from_numpy(_A ).to(device=_A , dtype=_A )
__magic_name__ : Optional[int] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__magic_name__ : str = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Optional[int] = self.get_inputs(_A )
__magic_name__ : Union[str, Any] = pipe(**_A ).images
__magic_name__ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__magic_name__ : List[Any] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3 | 275 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: str =['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[str] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
a : List[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
a : List[Any] = {
"""facebook/blenderbot_small-90M""": 5_1_2,
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = BlenderbotSmallTokenizer
def __init__( self , A=None , A=None , A="<|endoftext|>" , A="<|endoftext|>" , A="<|endoftext|>" , A=False , A=True , **A , ) -> Union[str, Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=A , merges=A , add_prefix_space=A , trim_offsets=A , ) , bos_token=A , eos_token=A , unk_token=A , **A , )
UpperCAmelCase : Optional[Any] = add_prefix_space
def _lowercase( self , A , A=None ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Any = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 338 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Dict = "pt"
elif is_tf_available():
A : List[Any] = "tf"
else:
A : Optional[Any] = "jax"
class _lowercase ( __a , unittest.TestCase):
"""simple docstring"""
A__ = PerceiverTokenizer
A__ = False
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def lowerCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=20 , __lowerCamelCase : Union[str, Any]=5 ):
'''simple docstring'''
lowerCamelCase__ : int = []
for i in range(len(__lowerCamelCase ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Tuple = list(filter(lambda __lowerCamelCase : re.match(R"^[ a-zA-Z]+$" , t[1] ) , __lowerCamelCase ) )
lowerCamelCase__ : Dict = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCamelCase ) , __lowerCamelCase ) )
if max_length is not None and len(__lowerCamelCase ) > max_length:
lowerCamelCase__ : Optional[Any] = toks[:max_length]
if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0:
while len(__lowerCamelCase ) < min_length:
lowerCamelCase__ : Any = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Any = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : int = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
if " " not in output_txt and len(__lowerCamelCase ) > 1:
lowerCamelCase__ : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase )
)
if with_prefix_space:
lowerCamelCase__ : int = " " + output_txt
lowerCamelCase__ : Any = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
return output_txt, output_ids
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Tuple = "Unicode €."
lowerCamelCase__ : Optional[Any] = tokenizer(__lowerCamelCase )
lowerCamelCase__ : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , __lowerCamelCase )
# decoding
lowerCamelCase__ : Tuple = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , "[CLS]Unicode €.[SEP]" )
lowerCamelCase__ : str = tokenizer("e è é ê ë" )
lowerCamelCase__ : Any = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , __lowerCamelCase )
# decoding
lowerCamelCase__ : List[str] = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowerCamelCase__ : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__ : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertNotIn("decoder_input_ids" , __lowerCamelCase )
self.assertNotIn("decoder_attention_mask" , __lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Optional[int] = [
"Summary of the text.",
"Another summary.",
]
lowerCamelCase__ : Dict = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
lowerCamelCase__ : str = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase__ : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : Dict = tokenizer.__class__.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Tuple = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
lowerCamelCase__ : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : str = tempfile.mkdtemp()
lowerCamelCase__ : List[Any] = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowerCamelCase__ : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCamelCase__ : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : str = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Dict = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(__lowerCamelCase )
lowerCamelCase__ : int = [f"<extra_id_{i}>" for i in range(125 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowerCamelCase__ : str = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : int = tokenizer_class.from_pretrained(
__lowerCamelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Any = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__lowerCamelCase )]
lowerCamelCase__ : int = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase__ : Optional[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
| 184 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( __a ):
__a : Union[str, Any] = """encodec"""
def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 34 | 0 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
lowercase_ : Union[str, Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" )
lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = GenerationConfig()
lowercase_ : int = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase_ : List[str] = copy.deepcopy(lowercase_ )
lowercase_ : Tuple = generation_config.update(**lowercase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {"""foo""": """bar"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = GenerationConfig()
lowercase_ : int = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ )
assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowercase_ )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowercase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ):
lowercase_ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
| 369 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
lowercase_ : Any = prime_factors(UpperCAmelCase__ )
if is_square_free(UpperCAmelCase__ ):
return -1 if len(UpperCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
a_ : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class snake_case ( datasets.BuilderConfig ):
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = "utf-8"
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = True # deprecated
_lowerCamelCase = None # deprecated
_lowerCamelCase = 10 << 20 # 10MB
_lowerCamelCase = None
class snake_case ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCamelCase = JsonConfig
def snake_case ( self ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
lowerCamelCase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase , (str, list, tuple) ):
lowerCamelCase_ = data_files
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [files]
lowerCamelCase_ = [dl_manager.iter_files(UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [files]
lowerCamelCase_ = [dl_manager.iter_files(UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase , gen_kwargs={"files": files} ) )
return splits
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCamelCase_ = self.config.features.arrow_schema.field(UpperCamelCase ).type
lowerCamelCase_ = pa_table.append_column(UpperCamelCase , pa.array([None] * len(UpperCamelCase ) , type=UpperCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ = table_cast(UpperCamelCase , self.config.features.arrow_schema )
return pa_table
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCamelCase_ = json.load(UpperCamelCase )
# We keep only the field we are interested in
lowerCamelCase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCamelCase , (list, tuple) ):
lowerCamelCase_ = set().union(*[row.keys() for row in dataset] )
lowerCamelCase_ = {col: [row.get(UpperCamelCase ) for row in dataset] for col in keys}
else:
lowerCamelCase_ = dataset
lowerCamelCase_ = pa.Table.from_pydict(UpperCamelCase )
yield file_idx, self._cast_table(UpperCamelCase )
# If the file has one json object per line
else:
with open(UpperCamelCase , "rb" ) as f:
lowerCamelCase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCamelCase_ = max(self.config.chunksize // 32 , 16 << 10 )
lowerCamelCase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
lowerCamelCase_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCamelCase_ = batch.decode(self.config.encoding , errors=UpperCamelCase ).encode("utf-8" )
try:
while True:
try:
lowerCamelCase_ = paj.read_json(
io.BytesIO(UpperCamelCase ) , read_options=paj.ReadOptions(block_size=UpperCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCamelCase , pa.ArrowInvalid )
and "straddling" not in str(UpperCamelCase )
or block_size > len(UpperCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(UpperCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCamelCase_ = json.load(UpperCamelCase )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCamelCase )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCamelCase , UpperCamelCase ): # list is the only sequence type supported in JSON
try:
lowerCamelCase_ = set().union(*[row.keys() for row in dataset] )
lowerCamelCase_ = {col: [row.get(UpperCamelCase ) for row in dataset] for col in keys}
lowerCamelCase_ = pa.Table.from_pydict(UpperCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCamelCase )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(UpperCamelCase )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCamelCase )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase )
batch_idx += 1
| 55 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __snake_case ( ):
lowerCamelCase_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCAmelCase_ )
lowerCamelCase_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=UpperCAmelCase_ )
env_command_parser(subparsers=UpperCAmelCase_ )
launch_command_parser(subparsers=UpperCAmelCase_ )
tpu_command_parser(subparsers=UpperCAmelCase_ )
test_command_parser(subparsers=UpperCAmelCase_ )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(UpperCAmelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 55 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 239 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int , a_ : int ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def __lowerCamelCase ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 239 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: str =[
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 0 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase__ ( lowercase_ ) -> list[float]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : Any = len(a_ )
for i in range(a_ ):
_UpperCamelCase : str = -1
for j in range(i + 1 ,a_ ):
if arr[i] < arr[j]:
_UpperCamelCase : List[Any] = arr[j]
break
result.append(a_ )
return result
def lowercase__ ( lowercase_ ) -> list[float]:
"""simple docstring"""
_UpperCamelCase : Any = []
for i, outer in enumerate(a_ ):
_UpperCamelCase : Tuple = -1
for inner in arr[i + 1 :]:
if outer < inner:
_UpperCamelCase : List[Any] = inner
break
result.append(a_ )
return result
def lowercase__ ( lowercase_ ) -> list[float]:
"""simple docstring"""
_UpperCamelCase : List[str] = len(a_ )
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : List[str] = [-1] * arr_size
for index in reversed(range(a_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_UpperCamelCase : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 364 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ :int = get_tests_dir('''fixtures''')
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =mock.Mock()
__UpperCamelCase : List[Any] =500
__UpperCamelCase : List[str] ={}
__UpperCamelCase : str =HTTPError
__UpperCamelCase : Optional[int] ={}
# Download this model to make sure it's in the cache.
__UpperCamelCase : int =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase__ ) as mock_head:
__UpperCamelCase : Optional[Any] =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls ):
"""simple docstring"""
__UpperCamelCase : Any =TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def __lowercase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
__UpperCamelCase : List[str] =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase__ , repo_id='test-feature-extractor' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
__UpperCamelCase : List[Any] =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
__UpperCamelCase : Union[str, Any] =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase__ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
__UpperCamelCase : Tuple =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase : Tuple =CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
__UpperCamelCase : int =AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 71 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def __lowercase ( self : List[str] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowercase ):
_a : Optional[int] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
_a : Union[str, Any] = FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
@slow
def __lowercase ( self : int ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowercase ):
_a : List[Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
_a : List[Any] = FlaxAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
@slow
def __lowercase ( self : int ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_a : Dict = AutoTokenizer.from_pretrained(__lowercase )
_a : List[Any] = FlaxBertModel.from_pretrained(__lowercase )
_a : Dict = tokenizer('Do you support jax jitted function?' ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase : int ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
@slow
def __lowercase ( self : Union[str, Any] ):
for model_name in ["roberta-base", "roberta-large"]:
_a : Optional[int] = AutoTokenizer.from_pretrained(__lowercase )
_a : Any = FlaxRobertaModel.from_pretrained(__lowercase )
_a : int = tokenizer('Do you support jax jitted function?' ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase : Any ):
return model(**__lowercase )
eval(**__lowercase ).block_until_ready()
def __lowercase ( self : Dict ):
with self.assertRaisesRegex(
__lowercase ,'bert-base is not a local folder and is not a valid model identifier' ):
_a : Tuple = FlaxAutoModel.from_pretrained('bert-base' )
def __lowercase ( self : str ):
with self.assertRaisesRegex(
__lowercase ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_a : List[Any] = FlaxAutoModel.from_pretrained(__lowercase ,revision='aaaaaa' )
def __lowercase ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__lowercase ,'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' ,):
_a : str = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowercase ( self : Optional[int] ):
with self.assertRaisesRegex(__lowercase ,'Use `from_pt=True` to load this model' ):
_a : Any = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 359 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class __magic_name__ :
def __init__( self : Union[str, Any] ,_UpperCAmelCase : int = 14 ):
if group not in primes:
raise ValueError('Unsupported Group' )
_a : str = primes[group]['prime']
_a : Optional[int] = primes[group]['generator']
_a : Tuple = int(hexlify(urandom(32 ) ) ,base=16 )
def __lowercase ( self : Dict ):
return hex(self.__private_key )[2:]
def __lowercase ( self : List[str] ):
_a : int = pow(self.generator ,self.__private_key ,self.prime )
return hex(_UpperCAmelCase )[2:]
def __lowercase ( self : int ,_UpperCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_UpperCAmelCase ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def __lowercase ( self : Tuple ,_UpperCAmelCase : str ):
_a : List[Any] = int(_UpperCAmelCase ,base=16 )
if not self.is_valid_public_key(_UpperCAmelCase ):
raise ValueError('Invalid public key' )
_a : Any = pow(_UpperCAmelCase ,self.__private_key ,self.prime )
return shaaaa(str(_UpperCAmelCase ).encode() ).hexdigest()
@staticmethod
def __lowercase ( _UpperCAmelCase : int ,_UpperCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_UpperCAmelCase ,(prime - 1) // 2 ,_UpperCAmelCase ) == 1
)
@staticmethod
def __lowercase ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : int = 14 ):
_a : str = int(_UpperCAmelCase ,base=16 )
_a : int = int(_UpperCAmelCase ,base=16 )
_a : Any = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError('Invalid public key' )
_a : List[str] = pow(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
return shaaaa(str(_UpperCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
if isinstance(A_, A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def snake_case_ ( A_ : int, A_ : Tuple, A_ : Any, A_ : Tuple, A_ : Dict=False ):
'''simple docstring'''
_lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_lowerCamelCase : List[str] = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_lowerCamelCase : str = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_lowerCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_lowerCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_lowerCamelCase : Any = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def snake_case_ ( A_ : str, A_ : int, A_ : str, A_ : List[str], A_ : Union[str, Any]=None ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 )
_lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.norm.weight''']
_lowerCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_lowerCamelCase : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Union[str, Any] = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = torch.load(A_, map_location='''cpu''' )
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[str] = checkpoint['''time_embed.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint['''time_embed.0.bias''']
_lowerCamelCase : List[str] = checkpoint['''time_embed.2.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
_lowerCamelCase : List[str] = checkpoint['''label_emb.weight''']
_lowerCamelCase : Any = checkpoint['''input_blocks.0.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint['''input_blocks.0.0.bias''']
_lowerCamelCase : Tuple = unet_config['''down_block_types''']
_lowerCamelCase : Tuple = unet_config['''layers_per_block''']
_lowerCamelCase : List[Any] = unet_config['''attention_head_dim''']
_lowerCamelCase : str = unet_config['''block_out_channels''']
_lowerCamelCase : Dict = 1
_lowerCamelCase : str = channels_list[0]
for i, layer_type in enumerate(A_ ):
_lowerCamelCase : str = channels_list[i]
_lowerCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
_lowerCamelCase : Union[str, Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Tuple = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : List[Any] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Tuple = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
_lowerCamelCase : List[Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : Optional[int] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Dict = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : str = F'''down_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Any = F'''input_blocks.{current_layer}.1'''
_lowerCamelCase : str = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : List[Any] = F'''down_blocks.{i}.downsamplers.0'''
_lowerCamelCase : List[str] = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : str = convert_resnet(A_, A_, A_, A_ )
current_layer += 1
_lowerCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_lowerCamelCase : Tuple = '''mid_block.resnets.0'''
_lowerCamelCase : int = '''middle_block.0'''
_lowerCamelCase : List[str] = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : List[str] = '''mid_block.attentions.0'''
_lowerCamelCase : Any = '''middle_block.1'''
_lowerCamelCase : List[Any] = convert_attention(A_, A_, A_, A_, A_ )
_lowerCamelCase : Tuple = '''mid_block.resnets.1'''
_lowerCamelCase : Tuple = '''middle_block.2'''
_lowerCamelCase : Dict = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = unet_config['''up_block_types''']
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : List[str] = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Dict = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Union[str, Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Tuple = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_lowerCamelCase : Tuple = convert_resnet(A_, A_, A_, A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : Any = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Optional[Any] = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Tuple = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : List[str] = F'''up_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Optional[int] = F'''output_blocks.{current_layer}.1'''
_lowerCamelCase : int = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Optional[int] = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : Tuple = F'''output_blocks.{current_layer-1}.2'''
_lowerCamelCase : Any = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : Optional[int] = checkpoint['''out.0.weight''']
_lowerCamelCase : int = checkpoint['''out.0.bias''']
_lowerCamelCase : Union[str, Any] = checkpoint['''out.2.weight''']
_lowerCamelCase : Optional[int] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 72 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ : List[str] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A: Optional[Any] = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = ["PerceiverFeatureExtractor"]
A: int = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
A: Optional[Any] = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ):
UpperCAmelCase : Any = os.path.abspath(UpperCamelCase )
logger.info(F"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
UpperCAmelCase : int = tf.train.list_variables(UpperCamelCase )
UpperCAmelCase : List[str] = []
UpperCAmelCase : int = []
UpperCAmelCase : int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCAmelCase : List[str] = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(F"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCAmelCase : str = name[1:]
# figure out how many levels deep the name is
UpperCAmelCase : Union[str, Any] = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(UpperCamelCase )
# read data
UpperCAmelCase : int = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
names.append("""/""".join(UpperCamelCase ) )
arrays.append(UpperCamelCase )
logger.info(F"Read a total of {len(UpperCamelCase ):,} layers" )
# Sanity check
if len(set(UpperCamelCase ) ) != 1:
raise ValueError(F"Found layer names with different depths (layer depth {list(set(UpperCamelCase ) )})" )
UpperCAmelCase : int = list(set(UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : List[Any] = full_name.split("""/""" )
UpperCAmelCase : Optional[Any] = model
UpperCAmelCase : str = []
for i, m_name in enumerate(UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
UpperCAmelCase : List[Any] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """embeddings""" )
UpperCAmelCase : List[str] = getattr(UpperCamelCase , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """encoder""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """layer""" )
UpperCAmelCase : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """pooler""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
UpperCAmelCase : int = getattr(UpperCamelCase , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
UpperCAmelCase : str = getattr(UpperCamelCase , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
UpperCAmelCase : int = getattr(UpperCamelCase , """token_type_embeddings""" )
else:
raise ValueError(F"Unknown embedding layer with name {full_name}" )
trace.append("""weight""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """attention""" )
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """attention""" )
UpperCAmelCase : Union[str, Any] = getattr(UpperCamelCase , """output""" )
UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
UpperCAmelCase : Any = getattr(UpperCamelCase , """attention""" )
UpperCAmelCase : Any = getattr(UpperCamelCase , """output""" )
UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
UpperCAmelCase : List[str] = getattr(UpperCamelCase , """output""" )
UpperCAmelCase : Tuple = getattr(UpperCamelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
UpperCAmelCase : Union[str, Any] = getattr(UpperCamelCase , """output""" )
UpperCAmelCase : List[str] = getattr(UpperCamelCase , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
UpperCAmelCase : Union[str, Any] = getattr(UpperCamelCase , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
UpperCAmelCase : Tuple = getattr(UpperCamelCase , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
UpperCAmelCase : Tuple = getattr(UpperCamelCase , """intermediate""" )
UpperCAmelCase : Any = getattr(UpperCamelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
UpperCAmelCase : Any = getattr(UpperCamelCase , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
UpperCAmelCase : List[str] = getattr(UpperCamelCase , """weight""" )
else:
logger.warning(F"Ignored {m_name}" )
# for certain layers reshape is necessary
UpperCAmelCase : List[str] = """.""".join(UpperCamelCase )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , UpperCamelCase ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , UpperCamelCase ):
UpperCAmelCase : Dict = array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCAmelCase : Dict = array.transpose()
if pointer.shape == array.shape:
UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCamelCase )
else:
raise ValueError(
F"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
F" {array.shape}" )
logger.info(F"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any ):
# Instantiate model
logger.info(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Union[str, Any] = BertConfig.from_json_file(UpperCamelCase )
UpperCAmelCase : int = BertModel(UpperCamelCase )
# Load weights from checkpoint
logger.info(F"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
logger.info(F"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
A: Dict = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 76 | 0 |
'''simple docstring'''
from copy import deepcopy
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : list[int] | None = None , lowerCAmelCase__ : int | None = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
_UpperCamelCase = size
_UpperCamelCase = [0] * size
elif arr is not None:
self.init(lowerCAmelCase__ )
else:
raise ValueError('''Either arr or size must be specified''' )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : list[int] ) -> None:
'''simple docstring'''
_UpperCamelCase = len(lowerCAmelCase__ )
_UpperCamelCase = deepcopy(lowerCAmelCase__ )
for i in range(1 , self.size ):
_UpperCamelCase = self.next_(lowerCAmelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def snake_case__ ( self : Union[str, Any] ) -> list[int]:
'''simple docstring'''
_UpperCamelCase = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_UpperCamelCase = self.next_(lowerCAmelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def snake_case__ ( lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def snake_case__ ( lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return index - (index & (-index))
def snake_case__ ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_UpperCamelCase = self.next_(lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
self.add(lowerCAmelCase__ , value - self.get(lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
if right == 0:
return 0
_UpperCamelCase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_UpperCamelCase = self.prev(lowerCAmelCase__ )
return result
def snake_case__ ( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return self.prefix(lowerCAmelCase__ ) - self.prefix(lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return self.query(lowerCAmelCase__ , index + 1 )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
_UpperCamelCase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_UpperCamelCase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCamelCase = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 324 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Dict=False ):
"""simple docstring"""
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'module.blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'module.blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'module.blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'module.blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'module.blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple, _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_a = ''''''
else:
_a = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(f'module.blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase, _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase, _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : List[str], _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = dct.pop(_lowerCAmelCase )
_a = val
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Any ):
"""simple docstring"""
_a = ViTMSNConfig()
_a = 10_00
_a = '''datasets/huggingface/label-files'''
_a = '''imagenet-1k-id2label.json'''
_a = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase ), '''r''' ) )
_a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_a = 3_84
_a = 15_36
_a = 6
elif "l16" in checkpoint_url:
_a = 10_24
_a = 40_96
_a = 24
_a = 16
_a = 0.1
elif "b4" in checkpoint_url:
_a = 4
elif "l7" in checkpoint_url:
_a = 7
_a = 10_24
_a = 40_96
_a = 24
_a = 16
_a = 0.1
_a = ViTMSNModel(_lowerCAmelCase )
_a = torch.hub.load_state_dict_from_url(_lowerCAmelCase, map_location='''cpu''' )['''target_encoder''']
_a = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCAmelCase )
_a = create_rename_keys(_lowerCAmelCase, base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase, _lowerCAmelCase, base_model=_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
_a = ViTImageProcessor(
size=config.image_size, image_mean=_lowerCAmelCase, image_std=_lowerCAmelCase )
_a = image_processor(images=_lowerCAmelCase, return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
_a = model(**_lowerCAmelCase )
_a = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_a = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
_a = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
_a = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
_a = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
_a = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _lowerCAmelCase, atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 153 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
_a = 4
_a = (1 << p) - 1
for _ in range(p - 2 ):
_a = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11)) | 153 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : Any = logging.get_logger(__name__)
a_ : Any = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """nat"""
_lowerCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __magic_name__=4 , __magic_name__=3 , __magic_name__=64 , __magic_name__=[3, 4, 6, 5] , __magic_name__=[2, 4, 8, 16] , __magic_name__=7 , __magic_name__=3.0 , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__="gelu" , __magic_name__=0.0_2 , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> Any:
super().__init__(**__magic_name__ )
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(__magic_name__ )
_a = num_heads
_a = kernel_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
_a = layer_scale_init_value
_a = ['stem'] + [f'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 168 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
_a = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _A () -> Tuple:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] =(DEISMultistepScheduler,)
a : str =(("num_inference_steps", 25),)
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case__ )
return config
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Any = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : List[str] = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : str = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : Optional[int] = 0.1 * sample
lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : Any = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : int = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = 10
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Any = model(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowerCAmelCase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase : int = scheduler.timesteps[5]
lowerCAmelCase : str = scheduler.timesteps[6]
lowerCAmelCase : str = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : Dict = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
lowerCAmelCase : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : str = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="deis" , solver_order=snake_case__ , solver_type=snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
lowerCAmelCase : Any = self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.full_loop()
lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : str = self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : Optional[Any] = 10
lowerCAmelCase : Tuple = self.dummy_model()
lowerCAmelCase : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 133 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
SCREAMING_SNAKE_CASE_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ) , version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowercase (_lowerCAmelCase , _lowerCAmelCase = None ):
__lowerCAmelCase = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , _UpperCamelCase ):
__lowerCAmelCase = requirement, None, None
else:
__lowerCAmelCase = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , _UpperCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f""" got {requirement}""" )
__lowerCAmelCase = match[0]
__lowerCAmelCase = want_full.split(""",""" ) # there could be multiple requirements
__lowerCAmelCase = {}
for w in want_range:
__lowerCAmelCase = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , _UpperCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f""" but got {requirement}""" )
__lowerCAmelCase = match[0]
__lowerCAmelCase = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__lowerCAmelCase = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return
# check if any version is installed
try:
__lowerCAmelCase = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The \'{requirement}\' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase , _UpperCamelCase )
| 301 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase_ = CLIPImageProcessor()
lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 279 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : int ,lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= s.rsplit(lowerCAmelCase_ ,lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __a ( lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= {}
UpperCAmelCase_= ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase_= key.replace(F"""{group_key}.""" ,F"""{group_key}.group.""" )
if "res_path" in key:
UpperCAmelCase_= key.replace("""res_path.""" ,"""res_path.path.""" )
if key.endswith(""".w""" ):
UpperCAmelCase_= rreplace(lowerCAmelCase_ ,""".w""" ,""".weight""" ,1 )
if key.endswith(""".b""" ):
UpperCAmelCase_= rreplace(lowerCAmelCase_ ,""".b""" ,""".bias""" ,1 )
UpperCAmelCase_= value.float()
return upgrade
@torch.no_grad()
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Optional[int]=None ,lowerCAmelCase_ : List[Any]=True ) -> Tuple:
'''simple docstring'''
from dall_e import Encoder
UpperCAmelCase_= Encoder()
if os.path.exists(lowerCAmelCase_ ):
UpperCAmelCase_= torch.load(lowerCAmelCase_ )
else:
UpperCAmelCase_= torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
UpperCAmelCase_= ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
UpperCAmelCase_= FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
UpperCAmelCase_= FlavaImageCodebookConfig()
UpperCAmelCase_= FlavaImageCodebook(lowerCAmelCase_ ).eval()
UpperCAmelCase_= encoder.state_dict()
UpperCAmelCase_= upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
UpperCAmelCase_= hf_model.state_dict()
UpperCAmelCase_= count_parameters(lowerCAmelCase_ )
UpperCAmelCase_= count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__A = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 277 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[List[List[str]]] , __UpperCAmelCase : List[List[str]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase )
}
| 277 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {
'''enabled''': True,
'''processes_per_host''': 8,
}
UpperCAmelCase__ : str = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
UpperCAmelCase__ : List[str] = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
UpperCAmelCase__ : Optional[int] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[Any] , _A : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase_ ( self : Optional[int] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 181 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
UpperCamelCase__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
UpperCamelCase__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="dummy_doc" ) -> Dict:
UpperCAmelCase__ : List[str] = {doc: key_lines}
UpperCAmelCase__ : int = {doc: sys_lines}
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : int = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : str = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : str = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase__ : Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : str = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : str = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Optional[int] = 0
for name, metric in metrics:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase__ : Any = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
UpperCAmelCase__ : str = line.split()[5]
if not parse_col == "-":
UpperCAmelCase__ : Tuple = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Dict=True , _A : Optional[int]=False , _A : str=False , _A : List[str]=False ):
'''simple docstring'''
UpperCAmelCase__ : Any = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
UpperCAmelCase__ : int = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase__ : List[str] = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 181 | 1 |
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple ) -> List[str]:
_UpperCamelCase : int = ""
_UpperCamelCase : str = ""
_UpperCamelCase : int = []
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : int , __a : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCamelCase : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCamelCase : int = self.__min_dist_top_down_dp(_a , n - 1 )
_UpperCamelCase : Optional[int] = self.__min_dist_top_down_dp(m - 1 , _a )
_UpperCamelCase : Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCamelCase : str = 1 + min(_a , _a , _a )
return self.dp[m][n]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str , __a : str ) -> Optional[Any]:
_UpperCamelCase : str = worda
_UpperCamelCase : Optional[int] = worda
_UpperCamelCase : Any = [[-1 for _ in range(len(_a ) )] for _ in range(len(_a ) )]
return self.__min_dist_top_down_dp(len(_a ) - 1 , len(_a ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str , __a : str ) -> Dict:
_UpperCamelCase : Optional[int] = worda
_UpperCamelCase : Union[str, Any] = worda
_UpperCamelCase : Any = len(_a )
_UpperCamelCase : Optional[Any] = len(_a )
_UpperCamelCase : Optional[int] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCamelCase : Union[str, Any] = j
elif j == 0: # second string is empty
_UpperCamelCase : Optional[int] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCamelCase : Any = self.dp[i - 1][j - 1]
else:
_UpperCamelCase : List[str] = self.dp[i][j - 1]
_UpperCamelCase : Any = self.dp[i - 1][j]
_UpperCamelCase : Optional[int] = self.dp[i - 1][j - 1]
_UpperCamelCase : List[Any] = 1 + min(_a , _a , _a )
return self.dp[m][n]
if __name__ == "__main__":
lowerCamelCase__ = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
lowerCamelCase__ = input("Enter the first string: ").strip()
lowerCamelCase__ = input("Enter the second string: ").strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 370 |
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Any = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'vit_msn'
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-06 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :List[Any] = hidden_size
a :Dict = num_hidden_layers
a :Union[str, Any] = num_attention_heads
a :Tuple = intermediate_size
a :str = hidden_act
a :Dict = hidden_dropout_prob
a :List[Any] = attention_probs_dropout_prob
a :Optional[int] = initializer_range
a :str = layer_norm_eps
a :Union[str, Any] = image_size
a :int = patch_size
a :str = num_channels
a :Optional[Any] = qkv_bias
| 94 |
snake_case : str = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
snake_case : int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 94 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ["""input_features""", """attention_mask"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : int=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = num_mel_bins
A: str = do_ceptral_normalize
A: int = normalize_means
A: List[Any] = normalize_vars
A: Any = True
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , ) -> np.ndarray:
'''simple docstring'''
A: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
A: List[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
A: str = x[:input_length].mean(axis=0 )
A: Dict = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if normalize_vars:
A: Tuple = x[:input_length].std(axis=0 )
A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
A: Optional[int] = padding_value
# make sure array is in float32
A: Optional[Any] = x.astype(np.floataa )
return x
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
A: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A: Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
A: Optional[Any] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
A: int = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A: Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A: Union[str, Any] = [raw_speech]
# extract fbank features
A: str = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech]
# convert into correct format for padding
A: int = BatchFeature({'''input_features''': features} )
A: int = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
A: List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
A: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
A: List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A: Dict = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A: List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
A: Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 334 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = DebertaTokenizer
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : int = DebertaTokenizerFast
def _snake_case ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A: Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A: Union[str, Any] = {'''unk_token''': '''[UNK]'''}
A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[int] = '''lower newer'''
A: str = '''lower newer'''
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A: str = self.get_tokenizer()
A: Any = '''lower newer'''
A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokens + [tokenizer.unk_token]
A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
A: str = self.get_tokenizer()
A: List[str] = tokenizer('''Hello''' , '''World''' )
A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']]
# fmt: off
A: Any = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A: Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ )
for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 334 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
a :Tuple = [[1, 2, 4], [1, 2, 3, 4]]
a :str = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE__ ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
a :Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = [[1, 2, 3], [1, 2, 4]]
a :Dict = DisjunctiveConstraint(_lowerCamelCase )
a , a , a :Union[str, Any] = dc.update(1 )
a :Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a :Optional[Any] = dc.update(2 )
a :Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a :Union[str, Any] = dc.update(3 )
a :Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a :List[str] = DisjunctiveConstraint(_lowerCamelCase )
a , a , a :Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a :Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a :Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a :Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a :List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a :Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a :List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 94 |
from collections.abc import Sequence
def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float:
'''simple docstring'''
if not arr:
return 0
__lowercase= 0 if allow_empty_subarrays else float('-inf' )
__lowercase= 0.0
for num in arr:
__lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowercase= max(lowercase__ , lowercase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 295 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=A , )
assert hasattr(self , 'env')
def _lowerCamelCase ( self : Any , A : Tuple=1) -> List[str]:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def _lowerCamelCase ( self : Dict , A : int) -> str:
"""simple docstring"""
TrainingJobAnalytics(A).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A)
| 290 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.