code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_lowercase : Optional[int] = str(lowerCamelCase_ )
_lowercase : Dict = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCamelCase_( lowerCamelCase_ = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(99)}")
| 89 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : str = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
], dtype=tf.floataa, )
_lowercase : List[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.intaa, ) # expected non filtered idx as noted above
_lowercase : Optional[Any] = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3], dtype=tf.floataa, ) # expected non filtered values as noted above
_lowercase : Tuple = tf_top_k_top_p_filtering(lowerCamelCase, top_k=10, top_p=0.6, min_tokens_to_keep=4)
_lowercase : Optional[Any] = output[output != -float('inf')]
_lowercase : Tuple = tf.cast(
tf.where(tf.not_equal(lowerCamelCase, tf.constant(-float('inf'), dtype=tf.floataa))), dtype=tf.intaa, )
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-12)
tf.debugging.assert_equal(lowerCamelCase, lowerCamelCase)
@require_tf
class _lowerCamelCase( unittest.TestCase, _a ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase_ : Optional[int] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
_lowercase : Any = 2
_lowercase : Any = 2
class _lowerCamelCase( tf.Module ):
def __init__( self, lowerCamelCase) -> Dict:
"""simple docstring"""
super(lowerCamelCase, self).__init__()
_lowercase : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length), tf.intaa, name='input_ids'),
tf.TensorSpec((None, input_length), tf.intaa, name='attention_mask'),
), jit_compile=lowerCamelCase, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Any = self.model.generate(
input_ids=lowerCamelCase, attention_mask=lowerCamelCase, max_new_tokens=lowerCamelCase, return_dict_in_generate=lowerCamelCase, )
return {"sequences": outputs["sequences"]}
_lowercase : Optional[Any] = [[2, 0], [1_02, 1_03]]
_lowercase : Any = [[1, 0], [1, 1]]
_lowercase : Any = DummyModel(model=lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCamelCase, lowerCamelCase, signatures={'serving_default': dummy_model.serving})
_lowercase : Optional[Any] = tf.saved_model.load(lowerCamelCase).signatures['serving_default']
for batch_size in range(1, len(lowerCamelCase) + 1):
_lowercase : List[Any] = {
'input_ids': tf.constant(dummy_input_ids[:batch_size]),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size]),
}
_lowercase : Optional[Any] = serving_func(**lowerCamelCase)['sequences']
_lowercase : Optional[int] = test_model.generate(**lowerCamelCase, max_new_tokens=lowerCamelCase)
tf.debugging.assert_equal(lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
_lowercase : List[Any] = 1
_lowercase : List[str] = 2
class _lowerCamelCase( tf.Module ):
def __init__( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
super(lowerCamelCase, self).__init__()
_lowercase : str = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None), tf.intaa, name='input_ids'),
tf.TensorSpec((batch_size, None), tf.intaa, name='attention_mask'),
), jit_compile=lowerCamelCase, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model.generate(
input_ids=lowerCamelCase, attention_mask=lowerCamelCase, max_new_tokens=lowerCamelCase, return_dict_in_generate=lowerCamelCase, )
return {"sequences": outputs["sequences"]}
_lowercase : List[str] = [[2], [1_02, 1_03]]
_lowercase : str = [[1], [1, 1]]
_lowercase : int = DummyModel(model=lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCamelCase, lowerCamelCase, signatures={'serving_default': dummy_model.serving})
_lowercase : Optional[int] = tf.saved_model.load(lowerCamelCase).signatures['serving_default']
for input_row in range(len(lowerCamelCase)):
_lowercase : Dict = {
'input_ids': tf.constant([dummy_input_ids[input_row]]),
'attention_mask': tf.constant([dummy_attention_masks[input_row]]),
}
_lowercase : str = serving_func(**lowerCamelCase)['sequences']
_lowercase : Optional[Any] = test_model.generate(**lowerCamelCase, max_new_tokens=lowerCamelCase)
tf.debugging.assert_equal(lowerCamelCase, lowerCamelCase)
@slow
@require_tensorflow_text
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small', filename='spiece.model', local_dir=lowerCamelCase)
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_lowercase : Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowerCamelCase, 'spiece.model'), 'rb').read())
_lowercase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5')
def UpperCamelCase ( self, lowerCamelCase, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = self.tokenizer.tokenize(lowerCamelCase)
_lowercase , _lowercase : str = text.pad_model_inputs(
lowerCamelCase, max_seq_length=64, pad_value=self.model.config.pad_token_id)
_lowercase : int = self.model.generate(input_ids=lowerCamelCase, attention_mask=lowerCamelCase)
return self.tokenizer.detokenize(lowerCamelCase)
_lowercase : int = CompleteSentenceTransformer()
_lowercase : Tuple = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='inputs')
_lowercase : Dict = complete_model(lowerCamelCase)
_lowercase : Tuple = tf.keras.Model(lowerCamelCase, lowerCamelCase)
keras_model.save(lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
_lowercase : Union[str, Any] = 14
_lowercase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
_lowercase : Optional[int] = 'Hello, my dog is cute and'
_lowercase : List[Any] = tokenizer(lowerCamelCase, return_tensors='tf')
_lowercase : int = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
_lowercase : Optional[int] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
_lowercase : Any = model.generate(**lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
_lowercase : Any = [6_38, 1_98]
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
_lowercase : List[Any] = model.generate(**lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart')
_lowercase : Union[str, Any] = 'Hugging Face is a technology company based in New York and Paris.'
_lowercase : Tuple = bart_tokenizer(lowerCamelCase, return_tensors='tf').input_ids
_lowercase : Optional[int] = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart')
_lowercase : List[Any] = bart_model.generate(lowerCamelCase).numpy()
class _lowerCamelCase( _a ):
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return super().call(lowerCamelCase, **lowerCamelCase)
_lowercase : Union[str, Any] = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart')
_lowercase : Tuple = bart_model.generate(lowerCamelCase, foo='bar').numpy()
self.assertTrue(np.array_equal(lowerCamelCase, lowerCamelCase))
class _lowerCamelCase( bart_model.model.encoder.__class__ ):
def UpperCamelCase ( self, lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
return super().call(lowerCamelCase, **lowerCamelCase)
_lowercase : Any = FakeEncoder(bart_model.config, bart_model.model.shared)
_lowercase : List[str] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_lowercase : List[Any] = bart_model.generate(lowerCamelCase).numpy()
with self.assertRaises(lowerCamelCase):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowerCamelCase, foo='bar')
| 89 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _lowerCamelCase( _a ):
lowercase_ : Tuple = """realm"""
def __init__( self, lowerCamelCase=3_05_22, lowerCamelCase=7_68, lowerCamelCase=1_28, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=8, lowerCamelCase=30_72, lowerCamelCase="gelu_new", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=2_56, lowerCamelCase=10, lowerCamelCase=1E-3, lowerCamelCase=5, lowerCamelCase=3_20, lowerCamelCase=13_35_37_18, lowerCamelCase=50_00, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=2, **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
# Common config
_lowercase : Tuple = vocab_size
_lowercase : int = max_position_embeddings
_lowercase : Optional[Any] = hidden_size
_lowercase : str = retriever_proj_size
_lowercase : Any = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : int = num_candidates
_lowercase : str = intermediate_size
_lowercase : str = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Tuple = initializer_range
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : str = layer_norm_eps
# Reader config
_lowercase : Optional[int] = span_hidden_size
_lowercase : Optional[Any] = max_span_width
_lowercase : Any = reader_layer_norm_eps
_lowercase : Tuple = reader_beam_size
_lowercase : Tuple = reader_seq_len
# Retrieval config
_lowercase : Tuple = num_block_records
_lowercase : Tuple = searcher_beam_size
| 89 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE : Tuple = TypeVar("T")
SCREAMING_SNAKE_CASE : List[Any] = TypeVar("U")
class _lowerCamelCase( Generic[T, U] ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = key
_lowercase : Union[str, Any] = val
_lowercase : DoubleLinkedListNode[T, U] | None = None
_lowercase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next)}, has prev: {bool(self.prev)}'''
)
class _lowerCamelCase( Generic[T, U] ):
def __init__( self) -> None:
"""simple docstring"""
_lowercase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase, lowerCamelCase)
_lowercase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase, lowerCamelCase)
_lowercase , _lowercase : Tuple = self.rear, self.head
def __repr__( self) -> str:
"""simple docstring"""
_lowercase : int = ['DoubleLinkedList']
_lowercase : Optional[int] = self.head
while node.next is not None:
rep.append(str(lowerCamelCase))
_lowercase : int = node.next
rep.append(str(self.rear))
return ",\n ".join(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowercase : Union[str, Any] = node
_lowercase : Dict = previous
_lowercase : Union[str, Any] = node
_lowercase : Union[str, Any] = self.rear
def UpperCamelCase ( self, lowerCamelCase) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowercase : Union[str, Any] = node.next
_lowercase : Union[str, Any] = node.prev
_lowercase : Tuple = None
_lowercase : int = None
return node
class _lowerCamelCase( Generic[T, U] ):
lowercase_ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : DoubleLinkedList[T, U] = DoubleLinkedList()
_lowercase : Optional[Any] = capacity
_lowercase : Optional[Any] = 0
_lowercase : str = 0
_lowercase : Optional[Any] = 0
_lowercase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self, lowerCamelCase) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCamelCase ( self, lowerCamelCase) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowercase : DoubleLinkedListNode[T, U] = self.cache[key]
_lowercase : List[Any] = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase)
return node.val
self.miss += 1
return None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowercase : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowercase : Union[str, Any] = DoubleLinkedListNode(lowerCamelCase, lowerCamelCase)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowercase : int = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
_lowercase : str = value
self.list.add(lowerCamelCase)
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = 1_28) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowerCamelCase) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowercase : int = LRUCache(lowerCamelCase)
_lowercase : Optional[int] = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
_lowercase : List[Any] = func(*lowerCamelCase)
cls.decorator_function_to_instance_map[func].put(args[0], lowerCamelCase)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase, 'cache_info', lowerCamelCase) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 1 |
import string
from math import logaa
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Union[str, Any] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
_lowercase : int = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : Dict = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
_lowercase : str = corpus_without_punctuation.split('\n' )
_lowercase : Any = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowerCamelCase_ ))
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
return round(tf * idf , 3 )
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
from __future__ import annotations
from random import random
class _lowerCamelCase:
def __init__( self, lowerCamelCase = None) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = value
_lowercase : Dict = random()
_lowercase : Node | None = None
_lowercase : Node | None = None
def __repr__( self) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)}, indent=1)
def __str__( self) -> str:
"""simple docstring"""
_lowercase : List[str] = str(self.value) + ' '
_lowercase : Dict = str(self.left or '')
_lowercase : str = str(self.right or '')
return value + left + right
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowercase , _lowercase : List[str] = split(root.left , lowerCamelCase_ )
return left, root
else:
_lowercase , _lowercase : str = split(root.right , lowerCamelCase_ )
return root, right
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowercase : List[str] = merge(left.right , lowerCamelCase_ )
return left
else:
_lowercase : Dict = merge(lowerCamelCase_ , right.left )
return right
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Node | None:
_lowercase : Optional[int] = Node(lowerCamelCase_ )
_lowercase , _lowercase : Optional[int] = split(lowerCamelCase_ , lowerCamelCase_ )
return merge(merge(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Node | None:
_lowercase , _lowercase : Tuple = split(lowerCamelCase_ , value - 1 )
_lowercase , _lowercase : Union[str, Any] = split(lowerCamelCase_ , lowerCamelCase_ )
return merge(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
_lowercase : Optional[Any] = insert(lowerCamelCase_ , int(arg[1:] ) )
elif arg[0] == "-":
_lowercase : List[str] = erase(lowerCamelCase_ , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def UpperCamelCase_( ) -> None:
_lowercase : List[str] = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_lowercase : Union[str, Any] = input()
while args != "q":
_lowercase : int = interact_treap(lowerCamelCase_ , lowerCamelCase_ )
print(lowerCamelCase_ )
_lowercase : Optional[int] = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0_5457_1817E-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE : int = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE : int = "src/transformers"
SCREAMING_SNAKE_CASE : Optional[int] = "docs/source/en/tasks"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase : Dict = f.readlines()
# Find the start prompt.
_lowercase : Optional[Any] = 0
while not lines[start_index].startswith(lowerCamelCase_ ):
start_index += 1
start_index += 1
_lowercase : Union[str, Any] = start_index
while not lines[end_index].startswith(lowerCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : Dict = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE : List[str] = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE : Optional[Any] = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : int = TASK_GUIDE_TO_MODELS[task_guide]
_lowercase : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCamelCase_ , set() )
_lowercase : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> Any:
_lowercase , _lowercase , _lowercase , _lowercase : Any = _find_text_in_file(
filename=os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_lowercase : int = get_model_list_for_task(lowerCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import os
from math import logaa
def UpperCamelCase_( lowerCamelCase_ = "base_exp.txt" ) -> int:
_lowercase : float = 0
_lowercase : Dict = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase_ ) , lowerCamelCase_ ) ) ):
_lowercase , _lowercase : Union[str, Any] = list(map(lowerCamelCase_ , line.split(',' ) ) )
if x * logaa(lowerCamelCase_ ) > largest:
_lowercase : Any = x * logaa(lowerCamelCase_ )
_lowercase : Union[str, Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 89 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowerCamelCase( _a ):
lowercase_ : Dict = """Wav2Vec2FeatureExtractor"""
lowercase_ : Tuple = """AutoTokenizer"""
def __init__( self, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase, lowerCamelCase)
_lowercase : Any = self.feature_extractor
_lowercase : Dict = False
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
try:
return super().from_pretrained(lowerCamelCase, **lowerCamelCase)
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ', lowerCamelCase, )
_lowercase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase, **lowerCamelCase)
_lowercase : int = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase, **lowerCamelCase)
return cls(feature_extractor=lowerCamelCase, tokenizer=lowerCamelCase)
def __call__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase, **lowerCamelCase)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
_lowercase : Any = kwargs.pop('raw_speech')
else:
_lowercase : Union[str, Any] = kwargs.pop('audio', lowerCamelCase)
_lowercase : int = kwargs.pop('sampling_rate', lowerCamelCase)
_lowercase : Optional[int] = kwargs.pop('text', lowerCamelCase)
if len(lowerCamelCase) > 0:
_lowercase : Optional[int] = args[0]
_lowercase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
_lowercase : Union[str, Any] = self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase)
if text is not None:
_lowercase : List[str] = self.tokenizer(lowerCamelCase, **lowerCamelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase : Any = encodings['input_ids']
return inputs
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase, **lowerCamelCase)
_lowercase : Optional[int] = kwargs.pop('input_features', lowerCamelCase)
_lowercase : Dict = kwargs.pop('labels', lowerCamelCase)
if len(lowerCamelCase) > 0:
_lowercase : List[str] = args[0]
_lowercase : List[Any] = args[1:]
if input_features is not None:
_lowercase : Optional[int] = self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
if labels is not None:
_lowercase : Dict = self.tokenizer.pad(lowerCamelCase, **lowerCamelCase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowercase : Any = labels['input_ids']
return input_features
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase)
@contextmanager
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
_lowercase : Optional[int] = True
_lowercase : int = self.tokenizer
yield
_lowercase : Any = self.feature_extractor
_lowercase : List[Any] = False
| 89 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = ["""input_values""", """padding_mask"""]
def __init__( self, lowerCamelCase = 1, lowerCamelCase = 2_40_00, lowerCamelCase = 0.0, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> Any:
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, **lowerCamelCase)
_lowercase : int = chunk_length_s
_lowercase : int = overlap
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length))
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.')
elif padding is None:
# by default let's pad the inputs
_lowercase : Tuple = True
_lowercase : Tuple = bool(
isinstance(lowerCamelCase, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list))))
if is_batched:
_lowercase : Tuple = [np.asarray(lowerCamelCase, dtype=np.floataa).T for audio in raw_audio]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray):
_lowercase : List[str] = np.asarray(lowerCamelCase, dtype=np.floataa)
elif isinstance(lowerCamelCase, np.ndarray) and raw_audio.dtype is np.dtype(np.floataa):
_lowercase : Union[str, Any] = raw_audio.astype(np.floataa)
# always return batch
if not is_batched:
_lowercase : str = [np.asarray(lowerCamelCase).T]
# verify inputs are valid
for idx, example in enumerate(lowerCamelCase):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''')
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''')
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''')
_lowercase : str = None
_lowercase : Optional[int] = BatchFeature({'input_values': raw_audio})
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowercase : str = min(array.shape[0] for array in raw_audio)
_lowercase : Tuple = int(np.floor(max_length / self.chunk_stride))
_lowercase : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowercase : Dict = max(array.shape[0] for array in raw_audio)
_lowercase : Optional[int] = int(np.ceil(max_length / self.chunk_stride))
_lowercase : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowercase : Tuple = 'max_length'
else:
_lowercase : int = input_values
# normal padding on batch
if padded_inputs is None:
_lowercase : int = self.pad(
lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase, padding=lowerCamelCase, return_attention_mask=lowerCamelCase, )
if padding:
_lowercase : Optional[Any] = padded_inputs.pop('attention_mask')
_lowercase : Union[str, Any] = []
for example in padded_inputs.pop('input_values'):
if self.feature_size == 1:
_lowercase : Tuple = example[..., None]
input_values.append(example.T)
_lowercase : Union[str, Any] = input_values
if return_tensors is not None:
_lowercase : str = padded_inputs.convert_to_tensors(lowerCamelCase)
return padded_inputs
| 89 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Dict = inspect.getfile(accelerate.test_utils)
_lowercase : str = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_script.py'])
_lowercase : Dict = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Any = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_lowercase : Any = [sys.executable] + distributed_args
execute_subprocess_async(lowerCamelCase, env=os.environ.copy())
| 89 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True ) -> Optional[int]:
model.train()
_lowercase : List[Any] = model(lowerCamelCase_ )
_lowercase : str = F.mse_loss(lowerCamelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> Tuple:
set_seed(42 )
_lowercase : List[str] = RegressionModel()
_lowercase : Union[str, Any] = deepcopy(lowerCamelCase_ )
_lowercase : str = RegressionDataset(length=80 )
_lowercase : Dict = DataLoader(lowerCamelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
_lowercase : int = AdamW(params=model.parameters() , lr=1e-3 )
_lowercase : Any = AdamW(params=ddp_model.parameters() , lr=1e-3 )
_lowercase : Optional[int] = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_ : epoch**0.65 )
_lowercase : int = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
_lowercase , _lowercase , _lowercase , _lowercase : int = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_lowercase , _lowercase , _lowercase : Optional[int] = get_training_setup(lowerCamelCase_ )
# Use a single batch
_lowercase , _lowercase : int = next(iter(lowerCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase : str = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
def UpperCamelCase_( lowerCamelCase_ ) -> str:
# Test on distributed setup that context manager behaves properly
_lowercase , _lowercase , _lowercase : str = get_training_setup(lowerCamelCase_ )
# Use a single batch
_lowercase , _lowercase : Tuple = next(iter(lowerCamelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase : Any = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase : Tuple = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
def UpperCamelCase_( lowerCamelCase_=False , lowerCamelCase_=False ) -> Optional[Any]:
_lowercase : Dict = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowercase , _lowercase , _lowercase : Optional[Any] = get_training_setup(lowerCamelCase_ )
for iteration, batch in enumerate(lowerCamelCase_ ):
_lowercase , _lowercase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase : int = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase : List[str] = ddp_input[torch.randperm(len(lowerCamelCase_ ) )]
GradientState._reset_state()
def UpperCamelCase_( lowerCamelCase_=False , lowerCamelCase_=False ) -> Dict:
_lowercase : Optional[int] = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = get_training_setup(lowerCamelCase_ , lowerCamelCase_ )
for iteration, batch in enumerate(lowerCamelCase_ ):
_lowercase , _lowercase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase : Any = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase_ ):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
_lowercase : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def UpperCamelCase_( ) -> Dict:
_lowercase : Any = Accelerator()
_lowercase : Dict = RegressionDataset(length=80 )
_lowercase : Any = DataLoader(lowerCamelCase_ , batch_size=16 )
_lowercase : Union[str, Any] = RegressionDataset(length=96 )
_lowercase : Optional[Any] = DataLoader(lowerCamelCase_ , batch_size=16 )
_lowercase , _lowercase : Tuple = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase_ )
if iteration < len(lowerCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase_ )
if batch_num < len(lowerCamelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase_( ) -> Tuple:
_lowercase : List[str] = Accelerator()
_lowercase : List[Any] = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(lowerCamelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(lowerCamelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowerCamelCase_ , lowerCamelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 1 |
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = n
_lowercase : Tuple = [None] * self.n
_lowercase : int = 0 # index of the first element
_lowercase : Any = 0
_lowercase : Optional[int] = 0
def __len__( self) -> int:
"""simple docstring"""
return self.size
def UpperCamelCase ( self) -> bool:
"""simple docstring"""
return self.size == 0
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
if self.size >= self.n:
raise Exception('QUEUE IS FULL')
_lowercase : List[str] = data
_lowercase : Tuple = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
if self.size == 0:
raise Exception('UNDERFLOW')
_lowercase : Union[str, Any] = self.array[self.front]
_lowercase : List[str] = None
_lowercase : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 | 1 |
from __future__ import annotations
SCREAMING_SNAKE_CASE : str = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
_lowercase : dict[str, str | None] = {}
_lowercase : Optional[int] = source_vertex
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : Tuple = {self.source_vertex}
_lowercase : Tuple = None
_lowercase : Tuple = [self.source_vertex] # first in first out queue
while queue:
_lowercase : Optional[int] = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase)
_lowercase : Tuple = vertex
queue.append(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
_lowercase : Tuple = self.parent.get(lowerCamelCase)
if target_vertex_parent is None:
_lowercase : Union[str, Any] = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase)
return self.shortest_path(lowerCamelCase) + F'''->{target_vertex}'''
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 89 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE : Optional[Any] = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCamelCase_( lowerCamelCase_ ) -> tuple:
return (data["data"], data["target"])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> np.ndarray:
_lowercase : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase_ , lowerCamelCase_ )
# Predict target for test data
_lowercase : int = xgb.predict(lowerCamelCase_ )
_lowercase : Optional[Any] = predictions.reshape(len(lowerCamelCase_ ) , 1 )
return predictions
def UpperCamelCase_( ) -> None:
_lowercase : Tuple = fetch_california_housing()
_lowercase , _lowercase : Any = data_handling(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = train_test_split(
lowerCamelCase_ , lowerCamelCase_ , test_size=0.25 , random_state=1 )
_lowercase : Optional[int] = xgboost(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCamelCase_ , lowerCamelCase_ )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCamelCase_ , lowerCamelCase_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 89 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE : Optional[int] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Optional[int] = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
_lowercase : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
_lowercase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_lowercase : Union[str, Any] = value
elif weight_type == "weight_g":
_lowercase : str = value
elif weight_type == "weight_v":
_lowercase : Optional[int] = value
elif weight_type == "bias":
_lowercase : str = value
else:
_lowercase : Union[str, Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = []
_lowercase : List[Any] = fairseq_model.state_dict()
_lowercase : Dict = hf_model.feature_extractor
_lowercase : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
_lowercase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : Optional[Any] = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowercase : Tuple = True
if "*" in mapped_key:
_lowercase : Optional[Any] = name.split(lowerCamelCase_ )[0].split('.' )[-2]
_lowercase : Dict = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
_lowercase : List[Any] = 'weight_g'
elif "weight_v" in name:
_lowercase : Tuple = 'weight_v'
elif "bias" in name:
_lowercase : Dict = 'bias'
elif "weight" in name:
_lowercase : Optional[Any] = 'weight'
else:
_lowercase : str = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : int = full_name.split('conv_layers.' )[-1]
_lowercase : str = name.split('.' )
_lowercase : str = int(items[0] )
_lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_lowercase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_lowercase : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_lowercase : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : Optional[Any] = full_name.split('adaptor.' )[-1]
_lowercase : List[str] = name.split('.' )
if items[1].isdigit():
_lowercase : Any = int(items[1] )
else:
_lowercase : Optional[int] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_lowercase : Dict = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_lowercase : List[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_lowercase : List[str] = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_lowercase : Any = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_lowercase : List[str] = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_lowercase : str = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : List[str] = emb.weight.shape
_lowercase : Tuple = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
_lowercase : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Union[str, Any]:
_lowercase : Any = WavaVecaConfig.from_pretrained(
lowerCamelCase_ , add_adapter=lowerCamelCase_ , adapter_stride=lowerCamelCase_ , adapter_kernel_size=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , output_hidden_size=lowerCamelCase_ , )
_lowercase : int = MBartConfig.from_pretrained(lowerCamelCase_ )
# load model
_lowercase , _lowercase , _lowercase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
_lowercase : Dict = model[0].eval()
# load feature extractor
_lowercase : Tuple = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase_ , use_auth_token=lowerCamelCase_ )
# set weights for wav2vec2 encoder
_lowercase : str = WavaVecaModel(lowerCamelCase_ )
recursively_load_weights_wavaveca(model.encoder , lowerCamelCase_ )
# load decoder weights
_lowercase : List[str] = MBartForCausalLM(lowerCamelCase_ )
_lowercase , _lowercase : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase_ )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_lowercase : Optional[Any] = SpeechEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
_lowercase : int = False
_lowercase : str = MBartaaTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
_lowercase : Tuple = hf_wavavec.config.to_dict()
_lowercase : int = tokenizer.pad_token_id
_lowercase : Optional[Any] = tokenizer.bos_token_id
_lowercase : int = tokenizer.eos_token_id
_lowercase : Tuple = 'mbart50'
_lowercase : Tuple = 'wav2vec2'
_lowercase : Dict = tokenizer.eos_token_id
_lowercase : List[Any] = 25_0004
_lowercase : int = tokenizer.eos_token_id
_lowercase : Any = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
feature_extractor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 89 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_lowercase : Tuple = TOKENIZER_CLASSES
else:
_lowercase : Union[str, Any] = {tokenizer_name: getattr(lowerCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_lowercase : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name]
_lowercase : str = True
if checkpoint_name is None:
_lowercase : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowercase : Optional[int] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_lowercase : Union[str, Any] = tokenizer_class.from_pretrained(lowerCamelCase_ , force_download=lowerCamelCase_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowercase , _lowercase : List[str] = checkpoint.split('/' )
_lowercase : int = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
elif add_prefix:
_lowercase : str = checkpoint
_lowercase : Any = dump_path
else:
_lowercase : Union[str, Any] = None
_lowercase : Dict = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowercase : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowercase : int = file_path.split(lowerCamelCase_ )[-1][0]
if next_char == "/":
_lowercase : Tuple = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Tuple = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_lowercase : str = tokenizer.save_pretrained(
lowerCamelCase_ , legacy_format=lowerCamelCase_ , filename_prefix=lowerCamelCase_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCamelCase_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 89 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE : int = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
SCREAMING_SNAKE_CASE : Dict = trt.Logger(trt.Logger.WARNING)
SCREAMING_SNAKE_CASE : str = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
if args.tokenizer_name:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
SCREAMING_SNAKE_CASE : Tuple = args.per_device_eval_batch_size
SCREAMING_SNAKE_CASE : str = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = "temp_engine/bert-fp32.engine"
if args.fpaa:
SCREAMING_SNAKE_CASE : Optional[Any] = "temp_engine/bert-fp16.engine"
if args.inta:
SCREAMING_SNAKE_CASE : Any = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
SCREAMING_SNAKE_CASE : Optional[int] = [network.get_input(i) for i in range(network.num_inputs)]
SCREAMING_SNAKE_CASE : int = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
SCREAMING_SNAKE_CASE : str = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
SCREAMING_SNAKE_CASE : Union[str, Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
SCREAMING_SNAKE_CASE : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase : List[Any] = np.asarray(inputs['input_ids'] , dtype=np.intaa )
_lowercase : Union[str, Any] = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
_lowercase : Any = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase_ )
# start time
_lowercase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase_ ) for d_inp in d_inputs] + [int(lowerCamelCase_ ), int(lowerCamelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
cuda.memcpy_dtoh_async(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
_lowercase : Any = time.time()
_lowercase : Dict = end_time - start_time
_lowercase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_datasets["validation"].column_names
SCREAMING_SNAKE_CASE : Optional[Any] = "question" if "question" in column_names else column_names[0]
SCREAMING_SNAKE_CASE : str = "context" if "context" in column_names else column_names[1]
SCREAMING_SNAKE_CASE : List[Any] = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
SCREAMING_SNAKE_CASE : Any = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
SCREAMING_SNAKE_CASE : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase_( lowerCamelCase_ ) -> str:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_lowercase : Tuple = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_lowercase : Optional[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=lowerCamelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_lowercase : str = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_lowercase : int = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_lowercase : Any = tokenized_examples.sequence_ids(lowerCamelCase_ )
_lowercase : str = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_lowercase : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
SCREAMING_SNAKE_CASE : str = raw_datasets["validation"]
# Validation Feature Creation
SCREAMING_SNAKE_CASE : List[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
SCREAMING_SNAKE_CASE : List[str] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
SCREAMING_SNAKE_CASE : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="eval" ) -> Union[str, Any]:
# Post-processing: we match the start logits and end logits to answers in the original context.
_lowercase : Dict = postprocess_qa_predictions(
examples=lowerCamelCase_ , features=lowerCamelCase_ , predictions=lowerCamelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_lowercase : Tuple = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
_lowercase : int = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
_lowercase : List[str] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase_ , label_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
return trt.volume(engine.get_binding_shape(lowerCamelCase_ ) ) * engine.get_binding_dtype(lowerCamelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
SCREAMING_SNAKE_CASE : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
SCREAMING_SNAKE_CASE : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
SCREAMING_SNAKE_CASE : List[str] = cuda.mem_alloc(h_outputa.nbytes)
SCREAMING_SNAKE_CASE : int = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
SCREAMING_SNAKE_CASE : Tuple = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
SCREAMING_SNAKE_CASE : Dict = 0.0
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : str = timeit.default_timer()
SCREAMING_SNAKE_CASE : Optional[Any] = None
for step, batch in enumerate(eval_dataloader):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = outputs
SCREAMING_SNAKE_CASE : str = torch.tensor(start_logits)
SCREAMING_SNAKE_CASE : Dict = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
SCREAMING_SNAKE_CASE : List[str] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
SCREAMING_SNAKE_CASE : Any = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
SCREAMING_SNAKE_CASE : Optional[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
SCREAMING_SNAKE_CASE : Any = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
SCREAMING_SNAKE_CASE : List[Any] = nested_truncate(all_preds, len(eval_dataset))
SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
SCREAMING_SNAKE_CASE : str = post_processing_function(eval_examples, eval_dataset, all_preds)
SCREAMING_SNAKE_CASE : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 89 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _lowerCamelCase( _a ):
lowercase_ : int = """ctrl"""
lowercase_ : List[Any] = ["""past_key_values"""]
lowercase_ : Any = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, lowerCamelCase=24_65_34, lowerCamelCase=2_56, lowerCamelCase=12_80, lowerCamelCase=81_92, lowerCamelCase=48, lowerCamelCase=16, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=1E-6, lowerCamelCase=0.0_2, lowerCamelCase=True, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = vocab_size
_lowercase : Optional[Any] = n_positions
_lowercase : int = n_embd
_lowercase : Union[str, Any] = n_layer
_lowercase : Tuple = n_head
_lowercase : Any = dff
_lowercase : Tuple = resid_pdrop
_lowercase : List[str] = embd_pdrop
_lowercase : Dict = layer_norm_epsilon
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = use_cache
super().__init__(**lowerCamelCase)
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = torch.nn.Linear(10, 10)
_lowercase : Optional[int] = torch.optim.SGD(model.parameters(), 0.1)
_lowercase : Optional[Any] = Accelerator()
_lowercase : Any = accelerator.prepare(lowerCamelCase)
try:
pickle.loads(pickle.dumps(lowerCamelCase))
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''')
AcceleratorState._reset_state()
| 89 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0_5457_1817E-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE : int = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : List[Any] = FlaxAutoencoderKL
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = 4
_lowercase : str = 3
_lowercase : str = (32, 32)
_lowercase : int = jax.random.PRNGKey(0)
_lowercase : Tuple = jax.random.uniform(lowerCamelCase, ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_lowercase : Any = self.dummy_input
return init_dict, inputs_dict
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowerCamelCase( unittest.TestCase ):
def __init__( self, lowerCamelCase, lowerCamelCase=7, lowerCamelCase=3, lowerCamelCase=18, lowerCamelCase=30, lowerCamelCase=4_00, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=[0.5, 0.5, 0.5], lowerCamelCase=[0.5, 0.5, 0.5], ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = parent
_lowercase : Union[str, Any] = batch_size
_lowercase : List[Any] = num_channels
_lowercase : str = image_size
_lowercase : str = min_resolution
_lowercase : str = max_resolution
_lowercase : int = do_resize
_lowercase : Dict = size if size is not None else {'height': 18, 'width': 20}
_lowercase : Optional[int] = do_thumbnail
_lowercase : Optional[Any] = do_align_axis
_lowercase : Optional[Any] = do_pad
_lowercase : str = do_normalize
_lowercase : Any = image_mean
_lowercase : Tuple = image_std
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = DonutImageProcessingTester(self)
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase, 'do_resize'))
self.assertTrue(hasattr(lowerCamelCase, 'size'))
self.assertTrue(hasattr(lowerCamelCase, 'do_thumbnail'))
self.assertTrue(hasattr(lowerCamelCase, 'do_align_long_axis'))
self.assertTrue(hasattr(lowerCamelCase, 'do_pad'))
self.assertTrue(hasattr(lowerCamelCase, 'do_normalize'))
self.assertTrue(hasattr(lowerCamelCase, 'image_mean'))
self.assertTrue(hasattr(lowerCamelCase, 'image_std'))
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 18, 'width': 20})
_lowercase : str = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {'height': 42, 'width': 42})
# Previous config had dimensions in (width, height) order
_lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84))
self.assertEqual(image_processor.size, {'height': 84, 'width': 42})
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image)
# Test not batched input
_lowercase : List[str] = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Optional[int] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
@is_flaky()
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray)
# Test not batched input
_lowercase : str = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Optional[Any] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
@is_flaky()
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor)
# Test not batched input
_lowercase : List[Any] = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Optional[int] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
| 89 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _lowerCamelCase( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self, lowerCamelCase=None, **lowerCamelCase) -> Tuple:
"""simple docstring"""
super().__init__(features=lowerCamelCase)
_lowercase : List[str] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
import torch
if isinstance(lowerCamelCase, lowerCamelCase) and column:
if all(
isinstance(lowerCamelCase, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(lowerCamelCase)
return column
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
import torch
if isinstance(lowerCamelCase, (str, bytes, type(lowerCamelCase))):
return value
elif isinstance(lowerCamelCase, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
_lowercase : Optional[int] = {}
if isinstance(lowerCamelCase, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
_lowercase : List[str] = {'dtype': torch.intaa}
elif isinstance(lowerCamelCase, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
_lowercase : Any = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase, PIL.Image.Image):
_lowercase : List[Any] = np.asarray(lowerCamelCase)
return torch.tensor(lowerCamelCase, **{**default_dtype, **self.torch_tensor_kwargs})
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase, '__array__') and not isinstance(lowerCamelCase, torch.Tensor):
_lowercase : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase) for substruct in data_struct])
elif isinstance(lowerCamelCase, (list, tuple)):
return self._consolidate([self.recursive_tensorize(lowerCamelCase) for substruct in data_struct])
return self._tensorize(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
return map_nested(self._recursive_tensorize, lowerCamelCase, map_list=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Mapping:
"""simple docstring"""
_lowercase : Optional[Any] = self.numpy_arrow_extractor().extract_row(lowerCamelCase)
_lowercase : Optional[Any] = self.python_features_decoder.decode_row(lowerCamelCase)
return self.recursive_tensorize(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> "torch.Tensor":
"""simple docstring"""
_lowercase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(lowerCamelCase)
_lowercase : int = self.python_features_decoder.decode_column(lowerCamelCase, pa_table.column_names[0])
_lowercase : Union[str, Any] = self.recursive_tensorize(lowerCamelCase)
_lowercase : Optional[Any] = self._consolidate(lowerCamelCase)
return column
def UpperCamelCase ( self, lowerCamelCase) -> Mapping:
"""simple docstring"""
_lowercase : str = self.numpy_arrow_extractor().extract_batch(lowerCamelCase)
_lowercase : List[Any] = self.python_features_decoder.decode_batch(lowerCamelCase)
_lowercase : Any = self.recursive_tensorize(lowerCamelCase)
for column_name in batch:
_lowercase : Tuple = self._consolidate(batch[column_name])
return batch
| 89 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
# Initialise PyTorch model
_lowercase : Optional[Any] = MobileBertConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Optional[Any] = MobileBertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
_lowercase : List[Any] = load_tf_weights_in_mobilebert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 89 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase( enum.Enum ):
lowercase_ : int = 0
lowercase_ : str = 1
lowercase_ : Optional[Any] = 2
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
lowercase_ : List[str] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_lowercase : int = None
if self.model.config.prefix is not None:
_lowercase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_lowercase : str = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_lowercase , _lowercase , _lowercase : Optional[Any] = self._sanitize_parameters(prefix=lowerCamelCase, **self._forward_params)
_lowercase : Dict = {**self._preprocess_params, **preprocess_params}
_lowercase : str = {**self._forward_params, **forward_params}
def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : List[str] = {}
if prefix is not None:
_lowercase : str = prefix
if prefix:
_lowercase : Dict = self.tokenizer(
lowerCamelCase, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework)
_lowercase : Any = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']')
_lowercase : str = handle_long_generation
preprocess_params.update(lowerCamelCase)
_lowercase : Optional[int] = generate_kwargs
_lowercase : Union[str, Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`')
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`')
_lowercase : Union[str, Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`')
_lowercase : List[str] = ReturnType.TENSORS
if return_type is not None:
_lowercase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_lowercase : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowercase : List[Any] = self.tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase)
if len(lowerCamelCase) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.')
_lowercase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True})
return super()._parse_and_tokenize(*lowerCamelCase, **lowerCamelCase)
def __call__( self, lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase="", lowerCamelCase=None, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : List[str] = self.tokenizer(
prefix + prompt_text, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework)
_lowercase : Optional[int] = prompt_text
if handle_long_generation == "hole":
_lowercase : Dict = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_lowercase : Any = generate_kwargs['max_new_tokens']
else:
_lowercase : Tuple = generate_kwargs.get('max_length', self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected')
if cur_len + new_tokens > self.tokenizer.model_max_length:
_lowercase : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length')
_lowercase : Tuple = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_lowercase : Optional[int] = inputs['attention_mask'][:, -keep_length:]
return inputs
def UpperCamelCase ( self, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Any = model_inputs['input_ids']
_lowercase : str = model_inputs.get('attention_mask', lowerCamelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
_lowercase : List[str] = None
_lowercase : int = None
_lowercase : str = 1
else:
_lowercase : Dict = input_ids.shape[0]
_lowercase : int = model_inputs.pop('prompt_text')
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_lowercase : Optional[int] = generate_kwargs.pop('prefix_length', 0)
if prefix_length > 0:
_lowercase : int = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_lowercase : Union[str, Any] = generate_kwargs.get('max_length') or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_lowercase : str = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_lowercase : Dict = self.model.generate(input_ids=lowerCamelCase, attention_mask=lowerCamelCase, **lowerCamelCase)
_lowercase : int = generated_sequence.shape[0]
if self.framework == "pt":
_lowercase : Optional[Any] = generated_sequence.reshape(lowerCamelCase, out_b // in_b, *generated_sequence.shape[1:])
elif self.framework == "tf":
_lowercase : Optional[int] = tf.reshape(lowerCamelCase, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=ReturnType.FULL_TEXT, lowerCamelCase=True) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = model_outputs['generated_sequence'][0]
_lowercase : str = model_outputs['input_ids']
_lowercase : Any = model_outputs['prompt_text']
_lowercase : str = generated_sequence.numpy().tolist()
_lowercase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_lowercase : Dict = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_lowercase : Union[str, Any] = self.tokenizer.decode(
lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_lowercase : Union[str, Any] = 0
else:
_lowercase : Dict = len(
self.tokenizer.decode(
input_ids[0], skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, ))
if return_type == ReturnType.FULL_TEXT:
_lowercase : int = prompt_text + text[prompt_length:]
else:
_lowercase : List[str] = text[prompt_length:]
_lowercase : Dict = {'generated_text': all_text}
records.append(lowerCamelCase)
return records
| 89 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = "▁"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE : int = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[str] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = VOCAB_FILES_NAMES
lowercase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Tuple = ["""input_ids""", """attention_mask"""]
lowercase_ : List[int] = []
lowercase_ : List[int] = []
def __init__( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", lowerCamelCase = None, **lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : Optional[Any] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else mask_token
_lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs
_lowercase : Any = kwargs.get('additional_special_tokens', [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase, tgt_lang=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
_lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCamelCase))
_lowercase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowercase : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowercase : Union[str, Any] = 1
_lowercase : Union[str, Any] = len(self.sp_model)
_lowercase : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase)
}
_lowercase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
_lowercase : Any = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_lowercase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowercase : Any = src_lang if src_lang is not None else 'en_XX'
_lowercase : Union[str, Any] = self.lang_code_to_id[self._src_lang]
_lowercase : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = self.__dict__.copy()
_lowercase : Optional[int] = None
return state
def __setstate__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Optional[Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase : str = {}
_lowercase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Dict = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase : int = self.sp_model.PieceToId(lowerCamelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = []
_lowercase : str = ''
_lowercase : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase) + token
_lowercase : str = True
_lowercase : Optional[Any] = []
else:
current_sub_tokens.append(lowerCamelCase)
_lowercase : List[Any] = False
out_string += self.sp_model.decode(lowerCamelCase)
return out_string.strip()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Dict = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase, 'wb') as fi:
_lowercase : Tuple = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase)
return (out_vocab_file,)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase)
_lowercase : List[str] = [1] * len(self.prefix_tokens)
_lowercase : Dict = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase)) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase)) + ([0] * len(lowerCamelCase)) + suffix_ones
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
_lowercase : str = src_lang
_lowercase : List[str] = self(lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
_lowercase : Dict = self.convert_tokens_to_ids(lowerCamelCase)
_lowercase : int = tgt_lang_id
return inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = "en_XX", lowerCamelCase = None, lowerCamelCase = "ro_RO", **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
_lowercase : Dict = src_lang
_lowercase : str = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase, lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCamelCase ( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Any = self.lang_code_to_id[src_lang]
_lowercase : List[str] = [self.cur_lang_code_id]
_lowercase : int = [self.eos_token_id]
def UpperCamelCase ( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : str = self.lang_code_to_id[tgt_lang]
_lowercase : str = [self.cur_lang_code_id]
_lowercase : Union[str, Any] = [self.eos_token_id]
| 89 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = "▁"
SCREAMING_SNAKE_CASE : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE : List[Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE : Tuple = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : int = ["""input_ids""", """attention_mask"""]
lowercase_ : List[int] = []
lowercase_ : List[int] = []
def __init__( self, lowerCamelCase, lowerCamelCase="<s>", lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase = None, lowerCamelCase=None, **lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Dict = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else mask_token
_lowercase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, tokenizer_file=lowerCamelCase, src_lang=lowerCamelCase, tgt_lang=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
_lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCamelCase))
_lowercase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowercase : List[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowercase : Dict = 1
_lowercase : List[Any] = len(self.sp_model)
_lowercase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase)
}
_lowercase : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
_lowercase : str = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_lowercase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowercase : List[Any] = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
_lowercase : Union[str, Any] = src_lang if src_lang is not None else 'en_XX'
_lowercase : int = self.lang_code_to_id[self._src_lang]
_lowercase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> str:
"""simple docstring"""
_lowercase : List[str] = self.__dict__.copy()
_lowercase : Tuple = None
_lowercase : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[int] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase : Tuple = {}
_lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase)
_lowercase : List[Any] = [1] * len(self.prefix_tokens)
_lowercase : Optional[Any] = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase)) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase)) + ([0] * len(lowerCamelCase)) + suffix_ones
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
_lowercase : List[Any] = src_lang
_lowercase : List[str] = self(lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
_lowercase : int = self.convert_tokens_to_ids(lowerCamelCase)
_lowercase : Any = tgt_lang_id
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase : Optional[Any] = self.sp_model.PieceToId(lowerCamelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase ( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = ''.join(lowerCamelCase).replace(lowerCamelCase, ' ').strip()
return out_string
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : int = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase, 'wb') as fi:
_lowercase : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase)
return (out_vocab_file,)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = "en_XX", lowerCamelCase = None, lowerCamelCase = "ro_RO", **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
_lowercase : Union[str, Any] = src_lang
_lowercase : str = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase, lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCamelCase ( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : List[str] = self.lang_code_to_id[src_lang]
_lowercase : str = []
_lowercase : List[Any] = [self.eos_token_id, self.cur_lang_code]
def UpperCamelCase ( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Optional[Any] = self.lang_code_to_id[lang]
_lowercase : Union[str, Any] = []
_lowercase : List[str] = [self.eos_token_id, self.cur_lang_code]
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase( unittest.TestCase ):
def __init__( self, lowerCamelCase, lowerCamelCase=7, lowerCamelCase=3, lowerCamelCase=18, lowerCamelCase=30, lowerCamelCase=4_00, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=True, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = size if size is not None else {'height': 18, 'width': 18}
_lowercase : Tuple = parent
_lowercase : List[str] = batch_size
_lowercase : Union[str, Any] = num_channels
_lowercase : List[Any] = image_size
_lowercase : List[Any] = min_resolution
_lowercase : Optional[Any] = max_resolution
_lowercase : Any = do_resize
_lowercase : int = size
_lowercase : Union[str, Any] = apply_ocr
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[str] = LayoutLMvaImageProcessingTester(self)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase, 'do_resize'))
self.assertTrue(hasattr(lowerCamelCase, 'size'))
self.assertTrue(hasattr(lowerCamelCase, 'apply_ocr'))
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 18, 'width': 18})
_lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {'height': 42, 'width': 42})
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image)
# Test not batched input
_lowercase : str = image_processing(image_inputs[0], return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
self.assertIsInstance(encoding.words, lowerCamelCase)
self.assertIsInstance(encoding.boxes, lowerCamelCase)
# Test batched
_lowercase : List[Any] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray)
# Test not batched input
_lowercase : int = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Optional[int] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor)
# Test not batched input
_lowercase : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Tuple = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowercase : Union[str, Any] = load_dataset('hf-internal-testing/fixtures_docvqa', split='test')
_lowercase : Union[str, Any] = Image.open(ds[0]['file']).convert('RGB')
_lowercase : Optional[Any] = image_processing(lowerCamelCase, return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24))
self.assertEqual(len(encoding.words), len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowercase : Union[str, Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_lowercase : str = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, lowerCamelCase)
self.assertListEqual(encoding.boxes, lowerCamelCase)
# with apply_OCR = False
_lowercase : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase)
_lowercase : Tuple = image_processing(lowerCamelCase, return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24))
| 89 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 | 1 |
SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
SCREAMING_SNAKE_CASE : int = tuple[float, float, float]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Vectorad:
_lowercase : int = end_pointa[0] - end_pointa[0]
_lowercase : List[Any] = end_pointa[1] - end_pointa[1]
_lowercase : Optional[int] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Vectorad:
_lowercase : Optional[int] = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowercase : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowercase : Tuple = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> bool:
return tuple(round(lowerCamelCase_ , lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 10 ) -> bool:
_lowercase : int = create_vector(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = create_vector(lowerCamelCase_ , lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
| 89 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 1 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 89 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
SCREAMING_SNAKE_CASE : Optional[int] = 8
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=BITS ) -> List[Any]:
_lowercase : List[str] = x.device
_lowercase : Union[str, Any] = (x * 255).int().clamp(0 , 255 )
_lowercase : List[str] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase_ )
_lowercase : Union[str, Any] = rearrange(lowerCamelCase_ , 'd -> d 1 1' )
_lowercase : Union[str, Any] = rearrange(lowerCamelCase_ , 'b c h w -> b c 1 h w' )
_lowercase : Union[str, Any] = ((x & mask) != 0).float()
_lowercase : Dict = rearrange(lowerCamelCase_ , 'b c d h w -> b (c d) h w' )
_lowercase : Tuple = bits * 2 - 1
return bits
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=BITS ) -> Dict:
_lowercase : List[Any] = x.device
_lowercase : List[Any] = (x > 0).int()
_lowercase : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase_ , dtype=torch.intaa )
_lowercase : List[str] = rearrange(lowerCamelCase_ , 'd -> d 1 1' )
_lowercase : Tuple = rearrange(lowerCamelCase_ , 'b (c d) h w -> b c d h w' , d=8 )
_lowercase : Optional[int] = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def UpperCamelCase_( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0.0 , lowerCamelCase_ = True , lowerCamelCase_=None , lowerCamelCase_ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_lowercase : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_lowercase : int = self.alphas_cumprod[timestep]
_lowercase : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_lowercase : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_lowercase : Any = self.bit_scale
if self.config.clip_sample:
_lowercase : str = torch.clamp(lowerCamelCase_ , -scale , lowerCamelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_lowercase : Dict = self._get_variance(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Union[str, Any] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_lowercase : str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : Tuple = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_lowercase : List[str] = model_output.device if torch.is_tensor(lowerCamelCase_ ) else 'cpu'
_lowercase : List[Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase_ ).to(lowerCamelCase_ )
_lowercase : Union[str, Any] = self._get_variance(lowerCamelCase_ , lowerCamelCase_ ) ** 0.5 * eta * noise
_lowercase : Optional[Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def UpperCamelCase_( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="epsilon" , lowerCamelCase_=None , lowerCamelCase_ = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
_lowercase : Union[str, Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_lowercase , _lowercase : Union[str, Any] = torch.split(lowerCamelCase_ , sample.shape[1] , dim=1 )
else:
_lowercase : Optional[int] = None
# 1. compute alphas, betas
_lowercase : Union[str, Any] = self.alphas_cumprod[t]
_lowercase : Dict = self.alphas_cumprod[t - 1] if t > 0 else self.one
_lowercase : Tuple = 1 - alpha_prod_t
_lowercase : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_lowercase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_lowercase : str = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_lowercase : Union[str, Any] = self.bit_scale
if self.config.clip_sample:
_lowercase : List[str] = torch.clamp(lowerCamelCase_ , -scale , lowerCamelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase : List[Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_lowercase : Optional[Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowercase : int = 0
if t > 0:
_lowercase : Union[str, Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase_ ).to(model_output.device )
_lowercase : Tuple = (self._get_variance(lowerCamelCase_ , predicted_variance=lowerCamelCase_ ) ** 0.5) * noise
_lowercase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1.0, ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_lowercase : str = bit_scale
_lowercase : Dict = (
ddim_bit_scheduler_step if isinstance(lowerCamelCase, lowerCamelCase) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase)
@torch.no_grad()
def __call__( self, lowerCamelCase = 2_56, lowerCamelCase = 2_56, lowerCamelCase = 50, lowerCamelCase = None, lowerCamelCase = 1, lowerCamelCase = "pil", lowerCamelCase = True, **lowerCamelCase, ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_lowercase : Optional[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width), generator=lowerCamelCase, )
_lowercase : Optional[int] = decimal_to_bits(lowerCamelCase) * self.bit_scale
_lowercase : Tuple = latents.to(self.device)
self.scheduler.set_timesteps(lowerCamelCase)
for t in self.progress_bar(self.scheduler.timesteps):
# predict the noise residual
_lowercase : List[str] = self.unet(lowerCamelCase, lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Tuple = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
_lowercase : Optional[Any] = bits_to_decimal(lowerCamelCase)
if output_type == "pil":
_lowercase : Any = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 89 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 1 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCamelCase_ , n - 1 , lowerCamelCase_ ) * a) % mod
else:
_lowercase : str = binary_exponentiation(lowerCamelCase_ , n / 2 , lowerCamelCase_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : str = 701
SCREAMING_SNAKE_CASE : Optional[int] = 1000000000
SCREAMING_SNAKE_CASE : Optional[int] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 89 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="resnet50", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=3, lowerCamelCase=True, lowerCamelCase=True, ) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = parent
_lowercase : List[Any] = out_indices if out_indices is not None else [4]
_lowercase : Optional[int] = stage_names
_lowercase : Any = out_features
_lowercase : int = backbone
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : List[Any] = num_channels
_lowercase : Union[str, Any] = use_pretrained_backbone
_lowercase : Optional[int] = is_training
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = TimmBackbone(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), )
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
_lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Dict = (TimmBackbone,) if is_torch_available() else ()
lowercase_ : Optional[Any] = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
lowercase_ : Union[str, Any] = False
lowercase_ : Optional[Any] = False
lowercase_ : Optional[Any] = False
lowercase_ : Any = False
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = TimmBackboneModelTester(self)
_lowercase : str = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = 'resnet18'
_lowercase : Any = 'microsoft/resnet-18'
_lowercase : List[str] = AutoBackbone.from_pretrained(lowerCamelCase, use_timm_backbone=lowerCamelCase)
_lowercase : int = AutoBackbone.from_pretrained(lowerCamelCase)
self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names), len(transformers_model.stage_names))
self.assertEqual(timm_model.channels, transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices, (-1,))
self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names) - 1])
_lowercase : List[Any] = AutoBackbone.from_pretrained(lowerCamelCase, use_timm_backbone=lowerCamelCase, out_indices=[1, 2, 3])
_lowercase : int = AutoBackbone.from_pretrained(lowerCamelCase, out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices, transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features))
self.assertEqual(timm_model.channels, transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('Safetensors is not supported by timm.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
_lowercase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[Any] = [*signature.parameters.keys()]
_lowercase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : str = True
_lowercase : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase : str = self.all_model_classes[0]
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = self._prepare_for_class(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = model(**lowerCamelCase)
_lowercase : Optional[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(**lowerCamelCase)
self.assertEqual(len(result.feature_maps), len(config.out_indices))
self.assertEqual(len(model.channels), len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase : Dict = copy.deepcopy(lowerCamelCase)
_lowercase : Optional[Any] = None
_lowercase : List[str] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[str] = model(**lowerCamelCase)
self.assertEqual(len(result.feature_maps), 1)
self.assertEqual(len(model.channels), 1)
# Check backbone can be initialized with fresh weights
_lowercase : Dict = copy.deepcopy(lowerCamelCase)
_lowercase : List[str] = False
_lowercase : List[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(**lowerCamelCase)
| 89 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = 0
_lowercase : Optional[int] = number
while duplicate > 0:
_lowercase , _lowercase : Optional[int] = divmod(lowerCamelCase_ , 10 )
fact_sum += factorial(lowerCamelCase_ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
SCREAMING_SNAKE_CASE : Any = int(input("Enter number: ").strip())
print(
F"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 89 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 1 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 1 |
import os
import string
import sys
SCREAMING_SNAKE_CASE : str = 1 << 8
SCREAMING_SNAKE_CASE : str = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = KEYMAP["up"]
SCREAMING_SNAKE_CASE : Union[str, Any] = KEYMAP["left"]
if sys.platform == "win32":
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Any = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
SCREAMING_SNAKE_CASE : Optional[int] = ord(str(i))
def UpperCamelCase_( ) -> Union[str, Any]:
if os.name == "nt":
import msvcrt
_lowercase : List[str] = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowerCamelCase_ ) == 0:
# Read the keystroke
_lowercase : Tuple = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowercase : str = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowercase : List[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowerCamelCase_ )
if ord(lowerCamelCase_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowercase : int = chr(KEYMAP['esc'] )
except KeyError:
_lowercase : List[str] = cha[1]
else:
_lowercase : List[Any] = ch.decode(lowerCamelCase_ )
else:
_lowercase : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowercase : str = sys.stdin.fileno()
_lowercase : List[str] = termios.tcgetattr(lowerCamelCase_ )
try:
tty.setraw(lowerCamelCase_ )
_lowercase : str = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowerCamelCase_ , termios.TCSADRAIN , lowerCamelCase_ )
return ch
def UpperCamelCase_( ) -> Union[str, Any]:
_lowercase : List[str] = get_raw_chars()
if ord(lowerCamelCase_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowerCamelCase_ ) == KEYMAP["esc"]:
_lowercase : List[str] = get_raw_chars()
if ord(lowerCamelCase_ ) == KEYMAP["mod_int"]:
_lowercase : List[Any] = get_raw_chars()
if ord(lowerCamelCase_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCamelCase_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowerCamelCase_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 89 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=1_28, lowerCamelCase=32, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = parent
_lowercase : Tuple = batch_size
_lowercase : List[str] = seq_length
_lowercase : Optional[int] = is_training
_lowercase : Dict = use_input_mask
_lowercase : Optional[int] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : List[str] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[int] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : Optional[int] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Tuple = scope
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Dict = None
if self.use_input_mask:
_lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : str = None
if self.use_token_type_ids:
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowercase : Union[str, Any] = None
_lowercase : List[Any] = None
_lowercase : str = None
if self.use_labels:
_lowercase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : List[str] = ids_tensor([self.batch_size], self.num_choices)
_lowercase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Any = self.prepare_config_and_inputs()
_lowercase : Dict = True
_lowercase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Any = NezhaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase)
_lowercase : Union[str, Any] = model(lowerCamelCase, token_type_ids=lowerCamelCase)
_lowercase : Tuple = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Any:
"""simple docstring"""
_lowercase : Dict = True
_lowercase : Union[str, Any] = NezhaModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, encoder_hidden_states=lowerCamelCase, encoder_attention_mask=lowerCamelCase, )
_lowercase : Dict = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, encoder_hidden_states=lowerCamelCase, )
_lowercase : Dict = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = NezhaForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = NezhaForNextSentencePrediction(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = NezhaForPreTraining(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, next_sentence_label=lowerCamelCase, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = NezhaForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Tuple = NezhaForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : Optional[Any] = NezhaForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Any = NezhaForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Tuple = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : str = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Tuple = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ : str = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Dict = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Dict:
"""simple docstring"""
_lowercase : Dict = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class in get_values(lowerCamelCase):
_lowercase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase)
_lowercase : Optional[int] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Any = NezhaModelTester(self)
_lowercase : Any = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : int = NezhaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_lowercase : Tuple = True
_lowercase : List[Any] = model_class(config=lowerCamelCase)
_lowercase : Dict = self._prepare_for_class(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = torch.jit.trace(
lowerCamelCase, (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase, os.path.join(lowerCamelCase, 'bert.pt'))
_lowercase : int = torch.jit.load(os.path.join(lowerCamelCase, 'bert.pt'), map_location=lowerCamelCase)
loaded(inputs_dict['input_ids'].to(lowerCamelCase), inputs_dict['attention_mask'].to(lowerCamelCase))
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base')
_lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]])
_lowercase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase)[0]
_lowercase : Any = torch.Size((1, 6, 7_68))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : List[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base')
_lowercase : int = torch.tensor([[0, 1, 2, 3, 4, 5]])
_lowercase : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_lowercase : Dict = model(lowerCamelCase, attention_mask=lowerCamelCase)[0]
_lowercase : Optional[Any] = torch.Size((1, 6, 2_11_28))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Optional[int] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], lowerCamelCase, atol=1E-4))
| 89 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'tf_padding'))
self.parent.assertTrue(hasattr(lowerCamelCase, 'depth_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.2_5, lowerCamelCase=8, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=32, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase="relu6", lowerCamelCase=12_80, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, ) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : List[str] = batch_size
_lowercase : str = num_channels
_lowercase : List[str] = image_size
_lowercase : Optional[int] = depth_multiplier
_lowercase : str = depth_divisible_by
_lowercase : List[Any] = min_depth
_lowercase : str = expand_ratio
_lowercase : Any = tf_padding
_lowercase : int = output_stride
_lowercase : Optional[Any] = first_layer_is_expansion
_lowercase : str = finegrained_output
_lowercase : List[str] = hidden_act
_lowercase : List[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
_lowercase : Tuple = classifier_dropout_prob
_lowercase : Union[str, Any] = use_labels
_lowercase : int = is_training
_lowercase : List[Any] = num_labels
_lowercase : List[Any] = initializer_range
_lowercase : Any = scope
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : str = None
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Any = ids_tensor([self.batch_size], self.num_labels)
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : int = MobileNetVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.num_labels
_lowercase : Dict = MobileNetVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = self.num_labels
_lowercase : List[str] = MobileNetVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Dict = config_and_inputs
_lowercase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : Optional[Any] = False
lowercase_ : List[Any] = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : int = MobileNetVaModelTester(self)
_lowercase : Tuple = MobileNetVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not output attentions')
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = model_class(lowerCamelCase)
_lowercase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[str] = [*signature.parameters.keys()]
_lowercase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Dict = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : str = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : str = outputs.hidden_states
_lowercase : Union[str, Any] = 16
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : str = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : int = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[str] = MobileNetVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Optional[int]:
_lowercase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224') if is_vision_available() else None
)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224').to(lowerCamelCase)
_lowercase : Union[str, Any] = self.default_image_processor
_lowercase : Tuple = prepare_img()
_lowercase : Union[str, Any] = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
# verify the logits
_lowercase : Union[str, Any] = torch.Size((1, 10_01))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : str = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
_lowercase : Union[str, Any] = model.to(lowerCamelCase)
_lowercase : int = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
_lowercase : Tuple = prepare_img()
_lowercase : Any = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
_lowercase : List[str] = outputs.logits
# verify the logits
_lowercase : Any = torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Optional[int] = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> List[int]:
if isinstance(lowerCamelCase_ , np.ndarray ):
return list(tensor.shape )
_lowercase : Optional[int] = tf.shape(lowerCamelCase_ )
if tensor.shape == tf.TensorShape(lowerCamelCase_ ):
return dynamic
_lowercase : List[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase_ )]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowerCamelCase_ , name=lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=1e-5 , lowerCamelCase_=-1 ) -> Union[str, Any]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_lowercase , _lowercase : Optional[Any] = tf.nn.moments(lowerCamelCase_ , axes=[axis] , keepdims=lowerCamelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowercase : str = [1] * inputs.shape.rank
_lowercase : List[Any] = shape_list(lowerCamelCase_ )[axis]
_lowercase : List[str] = tf.reshape(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = tf.reshape(lowerCamelCase_ , lowerCamelCase_ )
# Compute layer normalization using the batch_normalization
# function.
_lowercase : Any = tf.nn.batch_normalization(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , offset=lowerCamelCase_ , scale=lowerCamelCase_ , variance_epsilon=lowerCamelCase_ , )
return outputs
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0 , lowerCamelCase_=-1 ) -> str:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowercase : Union[str, Any] = tf.shape(lowerCamelCase_ )
_lowercase : str = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowercase : Optional[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> tf.Tensor:
if not isinstance(lowerCamelCase_ , tf.Tensor ):
_lowercase : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowercase : Optional[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowercase : Union[str, Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowercase : Optional[int] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = "input_ids" ) -> None:
tf.debugging.assert_less(
lowerCamelCase_ , tf.cast(lowerCamelCase_ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase_ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Dict = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowercase : Dict = [x for x in data if len(lowerCamelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
_lowercase : int = np.asarray(lowerCamelCase_ )
_lowercase : List[Any] = 1
_lowercase : List[str] = np.array_split(lowerCamelCase_ , lowerCamelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowercase : Optional[Any] = np.array_split(lowerCamelCase_ , lowerCamelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCamelCase_ ):
_lowercase : Union[str, Any] = chunk_data
else:
_lowercase : Dict = data
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
if name in group.attrs:
_lowercase : List[str] = [n.decode('utf8' ) if hasattr(lowerCamelCase_ , 'decode' ) else n for n in group.attrs[name]]
else:
_lowercase : List[str] = []
_lowercase : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(lowerCamelCase_ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCamelCase_( lowerCamelCase_ ) -> str:
def _expand_single_ad_tensor(lowerCamelCase_ ):
if isinstance(lowerCamelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCamelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase_ )
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE : int = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE : str = "sshleifer/tiny-mbart"
@require_torch
class _lowerCamelCase( _a ):
def UpperCamelCase ( self, lowerCamelCase=False, lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.run_trainer(
eval_steps=1, max_len=12, model_name=lowerCamelCase, num_train_epochs=1, distributed=lowerCamelCase, extra_args_str=lowerCamelCase, predict_with_generate=lowerCamelCase, do_train=lowerCamelCase, do_eval=lowerCamelCase, do_predict=lowerCamelCase, )
_lowercase : List[str] = TrainerState.load_from_json(os.path.join(lowerCamelCase, 'trainer_state.json')).log_history
if not do_eval:
return
_lowercase : Dict = [log for log in logs if 'eval_loss' in log.keys()]
_lowercase : List[str] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_lowercase : Optional[int] = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'], lowerCamelCase)
assert not math.isnan(float(last_step_stats['eval_loss'])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.run_seqaseq_quick(distributed=lowerCamelCase)
@require_torch_multi_gpu
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=lowerCamelCase)
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.run_seqaseq_quick(distributed=lowerCamelCase, extra_args_str='--sharded_ddp simple')
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
self.run_seqaseq_quick(distributed=lowerCamelCase, extra_args_str='--sharded_ddp simple --fp16')
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self) -> int:
"""simple docstring"""
self.run_seqaseq_quick(distributed=lowerCamelCase, extra_args_str='--sharded_ddp zero_dp_2', predict_with_generate=lowerCamelCase)
@unittest.skip('Requires an update of the env running those tests')
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=lowerCamelCase, extra_args_str='--sharded_ddp zero_dp_2 --fp16', predict_with_generate=lowerCamelCase)
@require_apex
@require_torch_gpu
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=lowerCamelCase, extra_args_str='--fp16 --fp16_backend=apex')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCamelCase, extra_args_str='--fp16 --fp16_backend=apex')
@parameterized.expand(['base', 'low', 'high', 'mixed'])
@require_torch_multi_gpu
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Any = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
_lowercase : Tuple = experiments[experiment_id]
_lowercase : List[str] = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
_lowercase : List[Any] = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCamelCase, extra_args_str=data['extra_args_str'])
_lowercase : List[str] = len(re.findall(lowerCamelCase, cl.err))
self.assertEqual(lowerCamelCase, data['n_matches'])
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.run_trainer(
eval_steps=2, max_len=1_28, model_name=lowerCamelCase, learning_rate=3E-4, num_train_epochs=10, distributed=lowerCamelCase, )
# Check metrics
_lowercase : Optional[Any] = TrainerState.load_from_json(os.path.join(lowerCamelCase, 'trainer_state.json')).log_history
_lowercase : Any = [log for log in logs if 'eval_loss' in log.keys()]
_lowercase : Union[str, Any] = eval_metrics[0]
_lowercase : int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'], lowerCamelCase)
# test if do_predict saves generations and metrics
_lowercase : List[Any] = os.listdir(lowerCamelCase)
_lowercase : str = {os.path.basename(lowerCamelCase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self) -> str:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCamelCase) -> Tuple[int, float]:
_lowercase : Optional[Any] = '--skip_memory_metrics 0'
_lowercase : Any = self.run_trainer(
max_len=1_28, model_name=lowerCamelCase, learning_rate=3E-4, num_train_epochs=1, optim=lowerCamelCase, distributed=lowerCamelCase, extra_args_str=lowerCamelCase, do_eval=lowerCamelCase, do_predict=lowerCamelCase, n_gpus_to_use=1, )
# Check metrics
_lowercase : int = TrainerState.load_from_json(Path(lowerCamelCase, 'trainer_state.json')).log_history
_lowercase : str = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20)
_lowercase : Optional[int] = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20)
_lowercase : int = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_lowercase , _lowercase , _lowercase : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
_lowercase , _lowercase , _lowercase : Dict = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
_lowercase : Union[str, Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_lowercase : Any = gpu_peak_mem_orig + gpu_alloc_mem_orig
_lowercase : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_lowercase : Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_lowercase : List[str] = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCamelCase, lowerCamelCase, 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''', )
self.assertGreater(
lowerCamelCase, lowerCamelCase, 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''', )
self.assertEqual(
lowerCamelCase, lowerCamelCase, F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 3E-3, lowerCamelCase = "adafactor", lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = 0, lowerCamelCase = True, lowerCamelCase = True, lowerCamelCase = True, lowerCamelCase = True, lowerCamelCase = None, ) -> Any:
"""simple docstring"""
_lowercase : str = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
_lowercase : int = self.get_auto_remove_tmp_dir()
_lowercase : List[str] = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCamelCase)}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCamelCase)}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_lowercase : str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCamelCase)}
'''.split()
_lowercase : Dict = '\n --do_predict\n '.split()
_lowercase : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_lowercase : Any = get_gpu_count()
_lowercase : List[Any] = get_torch_dist_unique_port()
_lowercase : str = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_lowercase : List[Any] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase, env=self.get_env())
else:
_lowercase : int = ['run_translation.py'] + args
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
main()
return output_dir
| 89 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0_5457_1817E-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE : int = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE : int = False
class _lowerCamelCase( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion')
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = 'A painting of a squirrel eating a burger '
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=2, output_type='numpy').images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = generator.manual_seed(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=2, output_type='numpy').images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Any = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion', torch_dtype=torch.floataa)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = 'A painting of a squirrel eating a burger '
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : Optional[int] = pipe(
prompt=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=50, output_type='numpy').images
_lowercase : List[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : List[str] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
def __init__( self, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['bs4'])
super().__init__(**lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = []
_lowercase : List[str] = []
_lowercase : str = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_lowercase : Optional[int] = parent.find_all(child.name, recursive=lowerCamelCase)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(lowerCamelCase) else next(i for i, s in enumerate(lowerCamelCase, 1) if s is child))
_lowercase : str = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : int = BeautifulSoup(lowerCamelCase, 'html.parser')
_lowercase : List[str] = []
_lowercase : Dict = []
_lowercase : Any = []
for element in html_code.descendants:
if type(lowerCamelCase) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
_lowercase : str = html.unescape(lowerCamelCase).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCamelCase)
_lowercase , _lowercase : Union[str, Any] = self.xpath_soup(lowerCamelCase)
stringaxtag_seq.append(lowerCamelCase)
stringaxsubs_seq.append(lowerCamelCase)
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError('Number of doc strings and xtags does not correspond')
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError('Number of doc strings and xsubs does not correspond')
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ''
for tagname, subs in zip(lowerCamelCase, lowerCamelCase):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self, lowerCamelCase) -> BatchFeature:
"""simple docstring"""
_lowercase : Optional[int] = False
# Check that strings has a valid type
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = True
elif isinstance(lowerCamelCase, (list, tuple)):
if len(lowerCamelCase) == 0 or isinstance(html_strings[0], lowerCamelCase):
_lowercase : Dict = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F'''but is of type {type(lowerCamelCase)}.''')
_lowercase : Optional[Any] = bool(isinstance(lowerCamelCase, (list, tuple)) and (isinstance(html_strings[0], lowerCamelCase)))
if not is_batched:
_lowercase : List[Any] = [html_strings]
# Get nodes + xpaths
_lowercase : Any = []
_lowercase : int = []
for html_string in html_strings:
_lowercase , _lowercase , _lowercase : Optional[int] = self.get_three_from_single(lowerCamelCase)
nodes.append(lowerCamelCase)
_lowercase : Any = []
for node, tag_list, sub_list in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : int = self.construct_xpath(lowerCamelCase, lowerCamelCase)
xpath_strings.append(lowerCamelCase)
xpaths.append(lowerCamelCase)
# return as Dict
_lowercase : Union[str, Any] = {'nodes': nodes, 'xpaths': xpaths}
_lowercase : Any = BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase)
return encoded_inputs
| 89 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 1 |
from __future__ import annotations
from math import pi
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _lowerCamelCase( _a ):
lowercase_ : Any = """M-CLIP"""
def __init__( self, lowerCamelCase=10_24, lowerCamelCase=7_68, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = transformerDimSize
_lowercase : Union[str, Any] = imageDimSize
super().__init__(**lowerCamelCase)
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = MCLIPConfig
def __init__( self, lowerCamelCase, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
super().__init__(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
_lowercase : Dict = XLMRobertaModel(lowerCamelCase)
_lowercase : Any = torch.nn.Linear(
in_features=config.transformerDimensions, out_features=config.numDims)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Tuple = self.transformer(input_ids=lowerCamelCase, attention_mask=lowerCamelCase)[0]
_lowercase : Union[str, Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(lowerCamelCase), embs
| 89 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import random
class _lowerCamelCase:
@staticmethod
def UpperCamelCase ( lowerCamelCase) -> tuple[list[int], list[int]]:
"""simple docstring"""
_lowercase : Optional[int] = [ord(lowerCamelCase) for i in text]
_lowercase : str = []
_lowercase : Tuple = []
for i in plain:
_lowercase : List[Any] = random.randint(1, 3_00)
_lowercase : Union[str, Any] = (i + k) * k
cipher.append(lowerCamelCase)
key.append(lowerCamelCase)
return cipher, key
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : List[Any] = []
for i in range(len(lowerCamelCase)):
_lowercase : str = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(lowerCamelCase))
return "".join(lowerCamelCase)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 89 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 1 |
from __future__ import annotations
SCREAMING_SNAKE_CASE : List[Any] = tuple[int, int, int]
SCREAMING_SNAKE_CASE : Tuple = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE : List[Any] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
SCREAMING_SNAKE_CASE : int = "FOBHMDKEXQNRAULPGSJVTYICZW"
SCREAMING_SNAKE_CASE : Any = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
SCREAMING_SNAKE_CASE : Tuple = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE : List[Any] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
SCREAMING_SNAKE_CASE : Dict = "SGLCPQWZHKXAREONTFBVIYJUDM"
SCREAMING_SNAKE_CASE : Dict = "HVSICLTYKQUBXDWAJZOMFGPREN"
SCREAMING_SNAKE_CASE : Union[str, Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
SCREAMING_SNAKE_CASE : int = "LFKIJODBEGAMQPXVUHYSTCZRWN"
SCREAMING_SNAKE_CASE : List[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCamelCase_ ) )) < 3:
_lowercase : int = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowerCamelCase_ )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : Dict = rotpos
if not 0 < rotorposa <= len(lowerCamelCase_ ):
_lowercase : Tuple = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
_lowercase : List[Any] = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase_ )
if not 0 < rotorposa <= len(lowerCamelCase_ ):
_lowercase : int = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCamelCase_ )
# Validates string and returns dict
_lowercase : Any = _plugboard(lowerCamelCase_ )
return rotpos, rotsel, pbdict
def UpperCamelCase_( lowerCamelCase_ ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : Union[str, Any] = F'''Plugboard setting isn\'t type string ({type(lowerCamelCase_ )})'''
raise TypeError(lowerCamelCase_ )
elif len(lowerCamelCase_ ) % 2 != 0:
_lowercase : Optional[int] = F'''Odd number of symbols ({len(lowerCamelCase_ )})'''
raise Exception(lowerCamelCase_ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : Dict = set()
for i in pbstring:
if i not in abc:
_lowercase : List[str] = F'''\'{i}\' not in list of symbols'''
raise Exception(lowerCamelCase_ )
elif i in tmppbl:
_lowercase : Dict = F'''Duplicate symbol ({i})'''
raise Exception(lowerCamelCase_ )
else:
tmppbl.add(lowerCamelCase_ )
del tmppbl
# Created the dictionary
_lowercase : Union[str, Any] = {}
for j in range(0 , len(lowerCamelCase_ ) - 1 , 2 ):
_lowercase : Optional[int] = pbstring[j + 1]
_lowercase : Optional[Any] = pbstring[j]
return pb
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (rotora, rotora, rotora) , lowerCamelCase_ = "" , ) -> str:
_lowercase : Dict = text.upper()
_lowercase , _lowercase , _lowercase : Tuple = _validator(
lowerCamelCase_ , lowerCamelCase_ , plugb.upper() )
_lowercase , _lowercase , _lowercase : Any = rotor_position
_lowercase , _lowercase , _lowercase : Dict = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : List[str] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : Dict = plugboard[symbol]
# rotor ra --------------------------
_lowercase : List[str] = abc.index(lowerCamelCase_ ) + rotorposa
_lowercase : Any = rotora[index % len(lowerCamelCase_ )]
# rotor rb --------------------------
_lowercase : List[Any] = abc.index(lowerCamelCase_ ) + rotorposa
_lowercase : Optional[int] = rotora[index % len(lowerCamelCase_ )]
# rotor rc --------------------------
_lowercase : List[Any] = abc.index(lowerCamelCase_ ) + rotorposa
_lowercase : Tuple = rotora[index % len(lowerCamelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : int = reflector[symbol]
# 2nd rotors
_lowercase : int = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
_lowercase : int = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
_lowercase : int = abc[rotora.index(lowerCamelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : List[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
_lowercase : str = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
_lowercase : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase_ ):
_lowercase : Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = "This is my Python script that emulates the Enigma machine from WWII."
SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 1, 1)
SCREAMING_SNAKE_CASE : List[str] = "pictures"
SCREAMING_SNAKE_CASE : Any = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE : Tuple = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 89 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : Tuple = credit_card_number
_lowercase : str = 0
_lowercase : List[Any] = len(lowerCamelCase_ ) - 2
for i in range(lowerCamelCase_ , -1 , -2 ):
# double the value of every second digit
_lowercase : List[str] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_lowercase : List[Any] = cc_number[:i] + str(lowerCamelCase_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCamelCase_ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : Optional[Any] = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(lowerCamelCase_ ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(lowerCamelCase_ ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(lowerCamelCase_ ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : int = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE : List[Any] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class _lowerCamelCase( _a ):
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any = ["""input_ids""", """attention_mask"""]
lowercase_ : List[str] = TaTokenizer
lowercase_ : List[int] = []
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="</s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase=1_00, lowerCamelCase=None, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_lowercase : Tuple = [F'''<extra_id_{i}>''' for i in range(lowerCamelCase)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_lowercase : Optional[int] = len(set(filter(lambda lowerCamelCase: bool('extra_id_' in str(lowerCamelCase)), lowerCamelCase)))
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, pad_token=lowerCamelCase, extra_ids=lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
_lowercase : Any = vocab_file
_lowercase : Dict = False if not self.vocab_file else True
_lowercase : Dict = extra_ids
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_lowercase : Dict = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.', lowerCamelCase, )
return max_model_length
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : List[Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase):
copyfile(self.vocab_file, lowerCamelCase)
logger.info(F'''Copy vocab file to {out_vocab_file}''')
return (out_vocab_file,)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : List[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_lowercase : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return list(
set(filter(lambda lowerCamelCase: bool(re.search(R'<extra_id_\d+>', lowerCamelCase)) is not None, self.additional_special_tokens)))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return [self.convert_tokens_to_ids(lowerCamelCase) for token in self.get_sentinel_tokens()]
| 89 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE : Optional[Any] = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> Any:
_lowercase , _lowercase : Dict = create_model(
'HTSAT-tiny' , 'roberta' , lowerCamelCase_ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=lowerCamelCase_ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = {}
_lowercase : Optional[Any] = R'.*sequential.(\d+).*'
_lowercase : Union[str, Any] = R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowercase : str = key.replace(lowerCamelCase_ , lowerCamelCase_ )
if re.match(lowerCamelCase_ , lowerCamelCase_ ):
# replace sequential layers with list
_lowercase : Dict = re.match(lowerCamelCase_ , lowerCamelCase_ ).group(1 )
_lowercase : List[Any] = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(lowerCamelCase_ )//3}.linear.''' )
elif re.match(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : List[Any] = int(re.match(lowerCamelCase_ , lowerCamelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowercase : str = 1 if projecton_layer == 0 else 2
_lowercase : str = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowercase : Union[str, Any] = value
_lowercase : Union[str, Any] = mixed_qkv.size(0 ) // 3
_lowercase : Any = mixed_qkv[:qkv_dim]
_lowercase : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowercase : str = mixed_qkv[qkv_dim * 2 :]
_lowercase : Optional[Any] = query_layer
_lowercase : Optional[Any] = key_layer
_lowercase : int = value_layer
else:
_lowercase : Optional[Any] = value
return model_state_dict
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Optional[int]:
_lowercase , _lowercase : Tuple = init_clap(lowerCamelCase_ , enable_fusion=lowerCamelCase_ )
clap_model.eval()
_lowercase : Optional[int] = clap_model.state_dict()
_lowercase : Dict = rename_state_dict(lowerCamelCase_ )
_lowercase : Optional[int] = ClapConfig()
_lowercase : Tuple = enable_fusion
_lowercase : int = ClapModel(lowerCamelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
transformers_config.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 89 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 1 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> list:
if len(lowerCamelCase_ ) == 0:
return []
_lowercase , _lowercase : Any = min(lowerCamelCase_ ), max(lowerCamelCase_ )
_lowercase : Tuple = int(max_value - min_value ) + 1
_lowercase : list[list] = [[] for _ in range(lowerCamelCase_ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCamelCase_ )
return [v for bucket in buckets for v in sorted(lowerCamelCase_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 89 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _lowerCamelCase:
lowercase_ : Tuple = MBartConfig
lowercase_ : Optional[Any] = {}
lowercase_ : List[Any] = """gelu"""
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=20, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=0, ) -> Optional[int]:
"""simple docstring"""
_lowercase : str = parent
_lowercase : str = batch_size
_lowercase : Union[str, Any] = seq_length
_lowercase : Dict = is_training
_lowercase : str = use_labels
_lowercase : List[Any] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Tuple = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Optional[Any] = eos_token_id
_lowercase : Optional[int] = pad_token_id
_lowercase : Any = bos_token_id
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
_lowercase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
_lowercase : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1)
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Dict = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
_lowercase : str = prepare_mbart_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase)
return config, inputs_dict
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = TFMBartModel(config=lowerCamelCase).get_decoder()
_lowercase : Union[str, Any] = inputs_dict['input_ids']
_lowercase : str = input_ids[:1, :]
_lowercase : Optional[int] = inputs_dict['attention_mask'][:1, :]
_lowercase : Tuple = inputs_dict['head_mask']
_lowercase : Tuple = 1
# first forward pass
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase)
_lowercase , _lowercase : Union[str, Any] = outputs.to_tuple()
_lowercase : Optional[int] = past_key_values[1]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> Optional[Any]:
if attention_mask is None:
_lowercase : Dict = tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowercase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowercase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[int] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase_ : Optional[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase_ : Optional[int] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ : Optional[Any] = True
lowercase_ : Union[str, Any] = False
lowercase_ : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[str] = TFMBartModelTester(self)
_lowercase : List[Any] = ConfigTester(self, config_class=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowercase_ : int = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowercase_ : Optional[Any] = """facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = self.translate_src_text(**lowerCamelCase)
self.assertListEqual(self.expected_text, lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer(self.src_text, **lowerCamelCase, return_tensors='tf')
_lowercase : int = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2)
_lowercase : List[Any] = self.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase)
return generated_words
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 89 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
SCREAMING_SNAKE_CASE : Optional[Any] = "▁"
class _lowerCamelCase( _a ):
lowercase_ : int = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = ["""input_ids""", """attention_mask"""]
lowercase_ : Dict = BarthezTokenizer
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="<s>", lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else mask_token
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, **lowerCamelCase, )
_lowercase : Tuple = vocab_file
_lowercase : Any = False if not self.vocab_file else True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : int = [self.cls_token_id]
_lowercase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : Optional[Any] = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Union[str, Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase):
copyfile(self.vocab_file, lowerCamelCase)
return (out_vocab_file,)
| 89 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger()
def UpperCamelCase_( ) -> Optional[Any]:
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument('-f' )
_lowercase : List[str] = parser.parse_args()
return args.f
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : int = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, 'run_glue_deebert.py')
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase, 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Optional[int] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Optional[Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
| 89 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=0.2, lowerCamelCase=0.2) -> str:
"""simple docstring"""
_lowercase : str = bp_numa
_lowercase : List[str] = bp_numa
_lowercase : List[str] = bp_numa
_lowercase : Union[str, Any] = conva_get[:2]
_lowercase : Any = conva_get[2]
_lowercase : Dict = size_pa
_lowercase : Dict = rate_w
_lowercase : int = rate_t
_lowercase : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_lowercase : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa) + 0.5)
_lowercase : Any = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa) + 0.5)
_lowercase : Dict = -2 * np.random.rand(self.conva[1]) + 1
_lowercase : Dict = -2 * np.random.rand(self.num_bpa) + 1
_lowercase : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : List[str] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(lowerCamelCase, 'wb') as f:
pickle.dump(lowerCamelCase, lowerCamelCase)
print(F'''Model saved: {save_path}''')
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase, 'rb') as f:
_lowercase : Union[str, Any] = pickle.load(lowerCamelCase) # noqa: S301
_lowercase : int = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
_lowercase : Optional[int] = model_dic.get('size_pooling1')
_lowercase : Union[str, Any] = model_dic.get('num_bp1')
_lowercase : Union[str, Any] = model_dic.get('num_bp2')
_lowercase : Tuple = model_dic.get('num_bp3')
_lowercase : int = model_dic.get('rate_weight')
_lowercase : List[str] = model_dic.get('rate_thre')
# create model instance
_lowercase : List[Any] = CNN(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
# modify model parameter
_lowercase : Optional[Any] = model_dic.get('w_conv1')
_lowercase : int = model_dic.get('wkj')
_lowercase : Dict = model_dic.get('vji')
_lowercase : Optional[Any] = model_dic.get('thre_conv1')
_lowercase : List[Any] = model_dic.get('thre_bp2')
_lowercase : Any = model_dic.get('thre_bp3')
return conv_ins
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x))
def UpperCamelCase ( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
return round(lowerCamelCase, 3)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = convs[0]
_lowercase : Any = convs[1]
_lowercase : Optional[Any] = np.shape(lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_lowercase : str = []
for i_focus in range(0, size_data - size_conv + 1, lowerCamelCase):
for j_focus in range(0, size_data - size_conv + 1, lowerCamelCase):
_lowercase : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_lowercase : List[Any] = []
_lowercase : str = int((size_data - size_conv) / conv_step + 1)
for i_map in range(lowerCamelCase):
_lowercase : Any = []
for i_focus in range(len(lowerCamelCase)):
_lowercase : Dict = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase))
_lowercase : Optional[Any] = np.asmatrix(lowerCamelCase).reshape(
lowerCamelCase, lowerCamelCase)
data_featuremap.append(lowerCamelCase)
# expanding the data slice to One dimenssion
_lowercase : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase))
_lowercase : Any = np.asarray(lowerCamelCase)
return focus_list, data_featuremap
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase="average_pool") -> Any:
"""simple docstring"""
_lowercase : Tuple = len(featuremaps[0])
_lowercase : List[Any] = int(size_map / size_pooling)
_lowercase : Optional[int] = []
for i_map in range(len(lowerCamelCase)):
_lowercase : Union[str, Any] = featuremaps[i_map]
_lowercase : Union[str, Any] = []
for i_focus in range(0, lowerCamelCase, lowerCamelCase):
for j_focus in range(0, lowerCamelCase, lowerCamelCase):
_lowercase : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase))
_lowercase : Tuple = np.asmatrix(lowerCamelCase).reshape(lowerCamelCase, lowerCamelCase)
featuremap_pooled.append(lowerCamelCase)
return featuremap_pooled
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = []
for i in range(len(lowerCamelCase)):
_lowercase : Optional[int] = np.shape(data[i])
_lowercase : Optional[Any] = data[i].reshape(1, shapes[0] * shapes[1])
_lowercase : int = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase)
_lowercase : Tuple = np.asarray(lowerCamelCase)
return data_expanded
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = np.asarray(lowerCamelCase)
_lowercase : Optional[int] = np.shape(lowerCamelCase)
_lowercase : Any = data_mat.reshape(1, shapes[0] * shapes[1])
return data_expanded
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = []
_lowercase : Dict = 0
for i_map in range(lowerCamelCase):
_lowercase : Tuple = np.ones((size_map, size_map))
for i in range(0, lowerCamelCase, lowerCamelCase):
for j in range(0, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[int] = pd_pool[
i_pool
]
_lowercase : Dict = i_pool + 1
_lowercase : List[Any] = np.multiply(
lowerCamelCase, np.multiply(out_map[i_map], (1 - out_map[i_map])))
pd_all.append(lowerCamelCase)
return pd_all
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=bool) -> List[str]:
"""simple docstring"""
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(lowerCamelCase)))
print((' - - Shape: Teach_Data ', np.shape(lowerCamelCase)))
_lowercase : Dict = 0
_lowercase : Tuple = []
_lowercase : List[Any] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
_lowercase : List[Any] = 0
print(F'''-------------Learning Time {rp}--------------''')
for p in range(len(lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_lowercase : Optional[int] = np.asmatrix(datas_train[p])
_lowercase : str = np.asarray(datas_teach[p])
_lowercase , _lowercase : List[Any] = self.convolute(
lowerCamelCase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowercase : Optional[int] = self.pooling(lowerCamelCase, self.size_poolinga)
_lowercase : List[str] = np.shape(lowerCamelCase)
_lowercase : Optional[Any] = self._expand(lowerCamelCase)
_lowercase : Dict = data_bp_input
_lowercase : Dict = np.dot(lowerCamelCase, self.vji.T) - self.thre_bpa
_lowercase : Dict = self.sig(lowerCamelCase)
_lowercase : int = np.dot(lowerCamelCase, self.wkj.T) - self.thre_bpa
_lowercase : Tuple = self.sig(lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowercase : Dict = np.multiply(
(data_teach - bp_outa), np.multiply(lowerCamelCase, (1 - bp_outa)))
_lowercase : Optional[int] = np.multiply(
np.dot(lowerCamelCase, self.wkj), np.multiply(lowerCamelCase, (1 - bp_outa)))
_lowercase : Optional[Any] = np.dot(lowerCamelCase, self.vji)
_lowercase : List[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowercase : int = pd_conva_pooled.T.getA().tolist()
_lowercase : Dict = self._calculate_gradient_from_pool(
lowerCamelCase, lowerCamelCase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_lowercase : Tuple = self._expand_mat(pd_conva_all[k_conv])
_lowercase : Optional[int] = self.rate_weight * np.dot(lowerCamelCase, lowerCamelCase)
_lowercase : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_lowercase : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_lowercase : str = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowercase : Optional[int] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowercase : Dict = self.thre_bpa - pd_k_all * self.rate_thre
_lowercase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowercase : Tuple = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowercase : List[Any] = rp + 1
_lowercase : Any = error_count / patterns
all_mse.append(lowerCamelCase)
def draw_error():
_lowercase : List[Any] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(lowerCamelCase, '+-')
plt.plot(lowerCamelCase, 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(lowerCamelCase, alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, F''' - - Mse: {mse:.6f}'''))
if draw_e:
draw_error()
return mse
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Optional[int] = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(lowerCamelCase)))
for p in range(len(lowerCamelCase)):
_lowercase : str = np.asmatrix(datas_test[p])
_lowercase , _lowercase : Dict = self.convolute(
lowerCamelCase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowercase : Optional[Any] = self.pooling(lowerCamelCase, self.size_poolinga)
_lowercase : Union[str, Any] = self._expand(lowerCamelCase)
_lowercase : Optional[int] = data_bp_input
_lowercase : Tuple = bp_outa * self.vji.T - self.thre_bpa
_lowercase : Any = self.sig(lowerCamelCase)
_lowercase : str = bp_outa * self.wkj.T - self.thre_bpa
_lowercase : Any = self.sig(lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_lowercase : Union[str, Any] = [list(map(self.do_round, lowerCamelCase)) for each in produce_out]
return np.asarray(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = np.asmatrix(lowerCamelCase)
_lowercase , _lowercase : Optional[Any] = self.convolute(
lowerCamelCase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowercase : Optional[Any] = self.pooling(lowerCamelCase, self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 89 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self, 'vision')
self.check_model_type(lowerCamelCase)
def __call__( self, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
if "text_queries" in kwargs:
_lowercase : List[str] = kwargs.pop('text_queries')
if isinstance(lowerCamelCase, (str, Image.Image)):
_lowercase : Tuple = {'image': image, 'candidate_labels': candidate_labels}
else:
_lowercase : Optional[Any] = image
_lowercase : Union[str, Any] = super().__call__(lowerCamelCase, **lowerCamelCase)
return results
def UpperCamelCase ( self, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = {}
if "threshold" in kwargs:
_lowercase : Optional[int] = kwargs['threshold']
if "top_k" in kwargs:
_lowercase : str = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = load_image(inputs['image'])
_lowercase : Union[str, Any] = inputs['candidate_labels']
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Tuple = candidate_labels.split(',')
_lowercase : List[Any] = torch.tensor([[image.height, image.width]], dtype=torch.intaa)
for i, candidate_label in enumerate(lowerCamelCase):
_lowercase : List[Any] = self.tokenizer(lowerCamelCase, return_tensors=self.framework)
_lowercase : List[str] = self.image_processor(lowerCamelCase, return_tensors=self.framework)
yield {
"is_last": i == len(lowerCamelCase) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = model_inputs.pop('target_size')
_lowercase : Tuple = model_inputs.pop('candidate_label')
_lowercase : Dict = model_inputs.pop('is_last')
_lowercase : int = self.model(**lowerCamelCase)
_lowercase : int = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0.1, lowerCamelCase=None) -> Any:
"""simple docstring"""
_lowercase : Dict = []
for model_output in model_outputs:
_lowercase : int = model_output['candidate_label']
_lowercase : Optional[int] = BaseModelOutput(lowerCamelCase)
_lowercase : int = self.image_processor.post_process_object_detection(
outputs=lowerCamelCase, threshold=lowerCamelCase, target_sizes=model_output['target_size'])[0]
for index in outputs["scores"].nonzero():
_lowercase : Optional[int] = outputs['scores'][index].item()
_lowercase : Union[str, Any] = self._get_bounding_box(outputs['boxes'][index][0])
_lowercase : int = {'score': score, 'label': label, 'box': box}
results.append(lowerCamelCase)
_lowercase : str = sorted(lowerCamelCase, key=lambda lowerCamelCase: x["score"], reverse=lowerCamelCase)
if top_k:
_lowercase : int = results[:top_k]
return results
def UpperCamelCase ( self, lowerCamelCase) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.')
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = box.int().tolist()
_lowercase : Tuple = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 89 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> Tuple:
_lowercase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowercase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_lowercase : Dict = ''
else:
_lowercase : Union[str, Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_lowercase : int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowercase : List[Any] = in_proj_bias[: config.hidden_size]
_lowercase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowercase : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = dct.pop(lowerCamelCase_ )
_lowercase : int = val
def UpperCamelCase_( ) -> Optional[int]:
_lowercase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Optional[int] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Tuple = DeiTConfig()
# all deit models have fine-tuned heads
_lowercase : List[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowercase : int = 1000
_lowercase : List[str] = 'huggingface/label-files'
_lowercase : List[str] = 'imagenet-1k-id2label.json'
_lowercase : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : Tuple = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : List[str] = idalabel
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = int(deit_name[-6:-4] )
_lowercase : Union[str, Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
_lowercase : List[Any] = 192
_lowercase : Union[str, Any] = 768
_lowercase : Union[str, Any] = 12
_lowercase : List[str] = 3
elif deit_name[9:].startswith('small' ):
_lowercase : Tuple = 384
_lowercase : List[str] = 1536
_lowercase : List[Any] = 12
_lowercase : List[Any] = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
_lowercase : List[Any] = 1024
_lowercase : Tuple = 4096
_lowercase : Union[str, Any] = 24
_lowercase : Optional[Any] = 16
# load original model from timm
_lowercase : Union[str, Any] = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowercase : List[str] = timm_model.state_dict()
_lowercase : List[Any] = create_rename_keys(lowerCamelCase_ , lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# load HuggingFace model
_lowercase : Dict = DeiTForImageClassificationWithTeacher(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowercase : str = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowercase : Optional[Any] = DeiTImageProcessor(size=lowerCamelCase_ , crop_size=config.image_size )
_lowercase : List[str] = image_processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Union[str, Any] = encoding['pixel_values']
_lowercase : Tuple = model(lowerCamelCase_ )
_lowercase : Optional[int] = timm_model(lowerCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase_ , outputs.logits , atol=1e-3 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 89 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
assert column_title.isupper()
_lowercase : Optional[Any] = 0
_lowercase : Any = len(lowerCamelCase_ ) - 1
_lowercase : Union[str, Any] = 0
while index >= 0:
_lowercase : Optional[int] = (ord(column_title[index] ) - 64) * pow(26 , lowerCamelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 89 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 1 |
import numpy as np
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> np.ndarray:
return np.where(vector > 0 , lowerCamelCase_ , (alpha * (np.exp(lowerCamelCase_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 1 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : int = sorted(numsa + numsa )
_lowercase , _lowercase : List[str] = divmod(len(lowerCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Union[str, Any] = [float(x) for x in input("Enter the elements of first array: ").split()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 89 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 1 |
from collections import defaultdict
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> bool:
_lowercase : Any = first_str.lower().strip()
_lowercase : List[str] = second_str.lower().strip()
# Remove whitespace
_lowercase : str = first_str.replace(' ' , '' )
_lowercase : List[Any] = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
return False
# Default values for count should be 0
_lowercase : defaultdict[str, int] = defaultdict(lowerCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : List[Any] = input("Enter the first string ").strip()
SCREAMING_SNAKE_CASE : Any = input("Enter the second string ").strip()
SCREAMING_SNAKE_CASE : int = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(lowerCamelCase_ ):
print(F'''{i}\t\t{d}''' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
for j in range(lowerCamelCase_ ):
_lowercase , _lowercase , _lowercase : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> list[float]:
_lowercase : Optional[int] = [float('inf' )] * vertex_count
_lowercase : str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase_ ):
_lowercase , _lowercase , _lowercase : Any = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
_lowercase : int = distance[u] + w
_lowercase : Dict = check_negative_cycle(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[str] = int(input("Enter number of vertices: ").strip())
SCREAMING_SNAKE_CASE : Union[str, Any] = int(input("Enter number of edges: ").strip())
SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
SCREAMING_SNAKE_CASE : List[Any] = {"src": src, "dst": dest, "weight": weight}
SCREAMING_SNAKE_CASE : Any = int(input("\nEnter shortest path source:").strip())
SCREAMING_SNAKE_CASE : Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 89 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0_5457_1817E-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE : int = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger("transformers.models.speecht5")
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
hf_model.apply_weight_norm()
_lowercase : Tuple = checkpoint['input_conv.weight_g']
_lowercase : Any = checkpoint['input_conv.weight_v']
_lowercase : Tuple = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_lowercase : Any = checkpoint[F'''upsamples.{i}.1.weight_g''']
_lowercase : Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
_lowercase : Optional[Any] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_lowercase : Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
_lowercase : str = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
_lowercase : Dict = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
_lowercase : Tuple = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
_lowercase : str = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
_lowercase : Tuple = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
_lowercase : Union[str, Any] = checkpoint['output_conv.1.weight_g']
_lowercase : Dict = checkpoint['output_conv.1.weight_v']
_lowercase : Tuple = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , ) -> Tuple:
if config_path is not None:
_lowercase : Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase_ )
else:
_lowercase : str = SpeechTaHifiGanConfig()
_lowercase : Any = SpeechTaHifiGan(lowerCamelCase_ )
_lowercase : Optional[int] = torch.load(lowerCamelCase_ )
load_weights(orig_checkpoint['model']['generator'] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = np.load(lowerCamelCase_ )
_lowercase : List[str] = stats[0].reshape(-1 )
_lowercase : List[str] = stats[1].reshape(-1 )
_lowercase : List[Any] = torch.from_numpy(lowerCamelCase_ ).float()
_lowercase : int = torch.from_numpy(lowerCamelCase_ ).float()
model.save_pretrained(lowerCamelCase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 89 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[Any]:
_lowercase : Dict = None
if token is not None:
_lowercase : int = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
_lowercase : Dict = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowercase : Optional[Any] = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
_lowercase : str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
_lowercase : int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCamelCase_ ):
_lowercase : List[str] = requests.get(url + F'''&page={i + 2}''' , headers=lowerCamelCase_ ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> int:
_lowercase : Optional[int] = None
if token is not None:
_lowercase : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
_lowercase : Dict = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowercase : List[Any] = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
_lowercase : Any = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
_lowercase : Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCamelCase_ ):
_lowercase : Union[str, Any] = requests.get(url + F'''&page={i + 2}''' , headers=lowerCamelCase_ ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : List[Any] = None
if token is not None:
_lowercase : str = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
_lowercase : Dict = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
_lowercase : Union[str, Any] = result.headers['Location']
_lowercase : Dict = requests.get(lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
_lowercase : Tuple = os.path.join(lowerCamelCase_ , F'''{artifact_name}.zip''' )
with open(lowerCamelCase_ , 'wb' ) as fp:
fp.write(response.content )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> List[str]:
_lowercase : List[str] = []
_lowercase : List[Any] = []
_lowercase : Optional[Any] = None
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCamelCase_ ) as f:
for line in f:
_lowercase : str = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowercase : Dict = line[: line.index(': ' )]
_lowercase : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
_lowercase : Union[str, Any] = line[len('FAILED ' ) :]
failed_tests.append(lowerCamelCase_ )
elif filename == "job_name.txt":
_lowercase : List[Any] = line
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCamelCase_ )} for `errors` '''
F'''and {len(lowerCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
' problem.' )
_lowercase : Dict = None
if job_name and job_links:
_lowercase : Tuple = job_links.get(lowerCamelCase_ , lowerCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
_lowercase : List[str] = [x + [y] + [job_link] for x, y in zip(lowerCamelCase_ , lowerCamelCase_ )]
return result
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> str:
_lowercase : Optional[int] = []
_lowercase : Union[str, Any] = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCamelCase_ , job_links=lowerCamelCase_ ) )
return errors
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> List[Any]:
_lowercase : int = Counter()
counter.update([x[1] for x in logs] )
_lowercase : Dict = counter.most_common()
_lowercase : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowercase : Dict = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
_lowercase : Union[str, Any] = dict(sorted(r.items() , key=lambda lowerCamelCase_ : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = test.split('::' )[0]
if test.startswith('tests/models/' ):
_lowercase : Tuple = test.split('/' )[2]
else:
_lowercase : str = None
return test
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[Any]:
_lowercase : Dict = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowercase : Dict = [x for x in logs if x[2] is not None]
_lowercase : Optional[int] = {x[2] for x in logs}
_lowercase : Optional[Any] = {}
for test in tests:
_lowercase : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowercase : Tuple = counter.most_common()
_lowercase : List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowercase : Union[str, Any] = sum(error_counts.values() )
if n_errors > 0:
_lowercase : List[str] = {'count': n_errors, 'errors': error_counts}
_lowercase : Optional[Any] = dict(sorted(r.items() , key=lambda lowerCamelCase_ : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '| no. | error | status |'
_lowercase : int = '|-:|:-|:-|'
_lowercase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
_lowercase : str = reduced_by_error[error]['count']
_lowercase : Dict = F'''| {count} | {error[:100]} | |'''
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : str = '| model | no. of errors | major error | count |'
_lowercase : Tuple = '|-:|-:|-:|-:|'
_lowercase : List[Any] = [header, sep]
for model in reduced_by_model:
_lowercase : str = reduced_by_model[model]['count']
_lowercase , _lowercase : Dict = list(reduced_by_model[model]['errors'].items() )[0]
_lowercase : Dict = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
SCREAMING_SNAKE_CASE : str = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
SCREAMING_SNAKE_CASE : Tuple = get_job_links(args.workflow_run_id, token=args.token)
SCREAMING_SNAKE_CASE : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.find(" / ")
SCREAMING_SNAKE_CASE : Union[str, Any] = k[index + len(" / ") :]
SCREAMING_SNAKE_CASE : int = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
SCREAMING_SNAKE_CASE : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
SCREAMING_SNAKE_CASE : Any = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
SCREAMING_SNAKE_CASE : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
SCREAMING_SNAKE_CASE : Optional[Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
SCREAMING_SNAKE_CASE : Optional[Any] = reduce_by_error(errors)
SCREAMING_SNAKE_CASE : Tuple = reduce_by_model(errors)
SCREAMING_SNAKE_CASE : Dict = make_github_table(reduced_by_error)
SCREAMING_SNAKE_CASE : List[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 89 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
SCREAMING_SNAKE_CASE : str = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE : int = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
SCREAMING_SNAKE_CASE : Tuple = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
def remove_articles(lowerCamelCase_ ):
_lowercase : Any = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(lowerCamelCase_ , ' ' , lowerCamelCase_ )
def white_space_fix(lowerCamelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase_ ):
_lowercase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
return int(normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = [any(compute_exact(lowerCamelCase_ , lowerCamelCase_ ) for ref in refs ) for pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ )]
return (sum(lowerCamelCase_ ) / len(lowerCamelCase_ )) * 100
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Tuple = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowercase : Any = Counter(lowerCamelCase_ )
_lowercase : Optional[Any] = Counter(lowerCamelCase_ )
_lowercase : Optional[Any] = Counter()
for sgram, scount in sgramcounter.items():
_lowercase : Optional[Any] = scount * numref
_lowercase : Union[str, Any] = Counter(lowerCamelCase_ )
_lowercase : List[Any] = Counter()
for cgram, ccount in cgramcounter.items():
_lowercase : List[str] = ccount * numref
# KEEP
_lowercase : Tuple = sgramcounter_rep & cgramcounter_rep
_lowercase : int = keepgramcounter_rep & rgramcounter
_lowercase : List[Any] = sgramcounter_rep & rgramcounter
_lowercase : str = 0
_lowercase : int = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = 1
if len(lowerCamelCase_ ) > 0:
_lowercase : Optional[int] = keeptmpscorea / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowercase : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowercase : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowercase : Optional[int] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowercase : Optional[int] = sgramcounter_rep - cgramcounter_rep
_lowercase : Optional[int] = delgramcounter_rep - rgramcounter
_lowercase : Dict = sgramcounter_rep - rgramcounter
_lowercase : List[str] = 0
_lowercase : Optional[int] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowercase : int = 1
if len(lowerCamelCase_ ) > 0:
_lowercase : Tuple = deltmpscorea / len(lowerCamelCase_ )
# ADDITION
_lowercase : Any = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
_lowercase : Optional[Any] = set(lowerCamelCase_ ) & set(lowerCamelCase_ )
_lowercase : int = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
_lowercase : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = 1
if len(lowerCamelCase_ ) > 0:
_lowercase : Optional[int] = addtmpscore / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_lowercase : Any = addtmpscore / len(lowerCamelCase_ )
_lowercase : Optional[int] = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowercase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : List[str] = len(lowerCamelCase_ )
_lowercase : List[str] = ssent.split(' ' )
_lowercase : Optional[int] = csent.split(' ' )
_lowercase : Dict = []
_lowercase : Union[str, Any] = []
_lowercase : Optional[Any] = []
_lowercase : Union[str, Any] = []
_lowercase : Union[str, Any] = []
_lowercase : Any = []
_lowercase : Optional[Any] = []
_lowercase : Dict = []
_lowercase : Optional[Any] = []
_lowercase : int = []
for rsent in rsents:
_lowercase : Optional[int] = rsent.split(' ' )
_lowercase : List[Any] = []
_lowercase : str = []
_lowercase : Dict = []
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
_lowercase : Optional[int] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
_lowercase : List[Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
_lowercase : List[str] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
_lowercase : Optional[Any] = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
_lowercase : Dict = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
_lowercase : Tuple = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
_lowercase : str = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
_lowercase : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
_lowercase : List[str] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : int = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Optional[int] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowercase : Tuple = sum([delascore, delascore, delascore, delascore] ) / 4
_lowercase : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4
_lowercase : str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = True , lowerCamelCase_ = "13a" , lowerCamelCase_ = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowercase : int = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowercase : List[Any] = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase_ )()(lowerCamelCase_ )
else:
_lowercase : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase_ )
elif tokenizer == "moses":
_lowercase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ , escape=lowerCamelCase_ )
elif tokenizer == "penn":
_lowercase : Dict = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ )
else:
_lowercase : Any = sentence
if not return_str:
_lowercase : str = normalized_sent.split()
return normalized_sent
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
if not (len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == len(lowerCamelCase_ )):
raise ValueError('Sources length must match predictions and references lengths.' )
_lowercase : Optional[Any] = 0
for src, pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
sari_score += SARIsent(normalize(lowerCamelCase_ ) , normalize(lowerCamelCase_ ) , [normalize(lowerCamelCase_ ) for sent in refs] )
_lowercase : Dict = sari_score / len(lowerCamelCase_ )
return 100 * sari_score
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="exp" , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , ) -> List[str]:
_lowercase : Tuple = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_lowercase : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_lowercase : List[Any] = sacrebleu.corpus_bleu(
lowerCamelCase_ , lowerCamelCase_ , smooth_method=lowerCamelCase_ , smooth_value=lowerCamelCase_ , force=lowerCamelCase_ , lowercase=lowerCamelCase_ , use_effective_order=lowerCamelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCamelCase( datasets.Metric ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence'),
'references': datasets.Sequence(datasets.Value('string', id='sequence'), id='references'),
}), codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
], reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
], )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = {}
result.update({'sari': compute_sari(sources=lowerCamelCase, predictions=lowerCamelCase, references=lowerCamelCase)})
result.update({'sacrebleu': compute_sacrebleu(predictions=lowerCamelCase, references=lowerCamelCase)})
result.update({'exact': compute_em(predictions=lowerCamelCase, references=lowerCamelCase)})
return result
| 89 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import re
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : List[Any] = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 89 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : Optional[float] = field(
default=0.0, metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
lowercase_ : bool = field(default=_a, metadata={"""help""": """Whether to SortishSamler or not."""} )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
lowercase_ : bool = field(default=_a, metadata={"""help""": """whether to use adafactor"""} )
lowercase_ : Optional[float] = field(
default=_a, metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
lowercase_ : Optional[float] = field(
default=_a, metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
lowercase_ : Optional[float] = field(default=_a, metadata={"""help""": """Dropout probability. Goes into model.config."""} )
lowercase_ : Optional[float] = field(
default=_a, metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
lowercase_ : Optional[str] = field(
default="""linear""", metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''}, )
| 89 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : list[list[int]] = [[0 for _ in range(lowerCamelCase_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_lowercase : Dict = 1
for n in range(m + 1 ):
for k in range(1 , lowerCamelCase_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE : Optional[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCamelCase( _a ):
lowercase_ : Dict = """"""
lowercase_ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase_ : str = None # compression type in fsspec. ex: "gzip"
lowercase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowerCamelCase = "", lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
super().__init__(self, **lowerCamelCase)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowercase : str = fsspec.open(
lowerCamelCase, mode='rb', protocol=lowerCamelCase, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {}), # To avoid issues if it was already passed.
}, **(target_options or {}), )
_lowercase : Dict = os.path.basename(self.file.path.split('::')[0])
_lowercase : int = (
self.compressed_name[: self.compressed_name.rindex('.')]
if '.' in self.compressed_name
else self.compressed_name
)
_lowercase : int = None
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> Any:
"""simple docstring"""
return super()._strip_protocol(lowerCamelCase).lstrip('/')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.dir_cache is None:
_lowercase : List[str] = {**self.file.fs.info(self.file.path), 'name': self.uncompressed_name}
_lowercase : Optional[int] = {f['name']: f}
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.file.open().read()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = "rb", lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase=None, **lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Optional[int] = self._strip_protocol(lowerCamelCase)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCamelCase( _a ):
lowercase_ : Tuple = """bz2"""
lowercase_ : Dict = """bz2"""
lowercase_ : List[Any] = """.bz2"""
class _lowerCamelCase( _a ):
lowercase_ : Optional[Any] = """gzip"""
lowercase_ : int = """gzip"""
lowercase_ : Optional[Any] = """.gz"""
class _lowerCamelCase( _a ):
lowercase_ : Any = """lz4"""
lowercase_ : List[str] = """lz4"""
lowercase_ : str = """.lz4"""
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = """xz"""
lowercase_ : str = """xz"""
lowercase_ : int = """.xz"""
class _lowerCamelCase( _a ):
lowercase_ : List[str] = """zstd"""
lowercase_ : List[Any] = """zstd"""
lowercase_ : Tuple = """.zst"""
def __init__( self, lowerCamelCase, lowerCamelCase = "rb", lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = DEFAULT_BLOCK_SIZE, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
super().__init__(
fo=lowerCamelCase, mode=lowerCamelCase, target_protocol=lowerCamelCase, target_options=lowerCamelCase, block_size=lowerCamelCase, **lowerCamelCase, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowercase : List[Any] = self.file.__enter__
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = file_
def __enter__( self) -> Any:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
self._file.__exit__(*lowerCamelCase, **lowerCamelCase)
def __iter__( self) -> Optional[int]:
"""simple docstring"""
return iter(self._file)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return next(self._file)
def __getattr__( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
return getattr(self._file, lowerCamelCase)
def fixed_enter(*lowerCamelCase, **lowerCamelCase):
return WrappedFile(_enter(*lowerCamelCase, **lowerCamelCase))
_lowercase : Optional[int] = fixed_enter
| 89 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> tuple[int, int]:
try:
_lowercase : int = float(lowerCamelCase_ )
except ValueError:
raise ValueError('Please enter a valid number' )
_lowercase : Optional[int] = decimal - int(lowerCamelCase_ )
if fractional_part == 0:
return int(lowerCamelCase_ ), 1
else:
_lowercase : Dict = len(str(lowerCamelCase_ ).split('.' )[1] )
_lowercase : Any = int(decimal * (10**number_of_frac_digits) )
_lowercase : Tuple = 10**number_of_frac_digits
_lowercase , _lowercase : Tuple = denominator, numerator
while True:
_lowercase : List[Any] = dividend % divisor
if remainder == 0:
break
_lowercase , _lowercase : Optional[int] = divisor, remainder
_lowercase , _lowercase : Optional[Any] = numerator / divisor, denominator / divisor
return int(lowerCamelCase_ ), int(lowerCamelCase_ )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 89 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = ShapEImgaImgPipeline
lowercase_ : Optional[int] = ["""image"""]
lowercase_ : Optional[Any] = ["""image"""]
lowercase_ : Optional[Any] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Optional[int] = False
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return 8
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
_lowercase : str = CLIPVisionModel(lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = CLIPImageProcessor(
crop_size=2_24, do_center_crop=lowerCamelCase, do_normalize=lowerCamelCase, do_resize=lowerCamelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=2_24, )
return image_processor
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Tuple = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowercase : Union[str, Any] = PriorTransformer(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : str = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_lowercase : Tuple = ShapERenderer(**lowerCamelCase)
return model
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.dummy_prior
_lowercase : Optional[Any] = self.dummy_image_encoder
_lowercase : List[str] = self.dummy_image_processor
_lowercase : Union[str, Any] = self.dummy_renderer
_lowercase : Optional[int] = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=10_24, prediction_type='sample', use_karras_sigmas=lowerCamelCase, clip_sample=lowerCamelCase, clip_sample_range=1.0, )
_lowercase : Union[str, Any] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : int = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : List[str] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = 'cpu'
_lowercase : List[str] = self.get_dummy_components()
_lowercase : Optional[Any] = self.pipeline_class(**lowerCamelCase)
_lowercase : List[Any] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : str = output.images[0]
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowercase : Tuple = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = torch_device == 'cpu'
_lowercase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCamelCase, relax_max_difference=lowerCamelCase, )
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : List[Any] = self.pipeline_class(**lowerCamelCase)
_lowercase : Tuple = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 1
_lowercase : Union[str, Any] = 2
_lowercase : List[str] = self.get_dummy_inputs(lowerCamelCase)
for key in inputs.keys():
if key in self.batch_params:
_lowercase : List[Any] = batch_size * [inputs[key]]
_lowercase : Dict = pipe(**lowerCamelCase, num_images_per_prompt=lowerCamelCase)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png')
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy')
_lowercase : Any = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img')
_lowercase : Optional[int] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : int = pipe(
lowerCamelCase, generator=lowerCamelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 89 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE : str = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
SCREAMING_SNAKE_CASE : Dict = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
SCREAMING_SNAKE_CASE : Union[str, Any] = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCamelCase( datasets.Metric ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence'),
'references': datasets.Value('string', id='sequence'),
}), reference_urls=[], )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, ) -> Any:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowercase : List[str] = np.array([re.sub(lowerCamelCase, '', lowerCamelCase) for x in predictions])
_lowercase : Optional[Any] = np.array([re.sub(lowerCamelCase, '', lowerCamelCase) for x in references])
else:
_lowercase : Dict = np.asarray(lowerCamelCase)
_lowercase : Dict = np.asarray(lowerCamelCase)
if ignore_case:
_lowercase : Union[str, Any] = np.char.lower(lowerCamelCase)
_lowercase : List[str] = np.char.lower(lowerCamelCase)
if ignore_punctuation:
_lowercase : Optional[Any] = string.punctuation.maketrans('', '', string.punctuation)
_lowercase : Any = np.char.translate(lowerCamelCase, table=lowerCamelCase)
_lowercase : Optional[int] = np.char.translate(lowerCamelCase, table=lowerCamelCase)
if ignore_numbers:
_lowercase : List[str] = string.digits.maketrans('', '', string.digits)
_lowercase : int = np.char.translate(lowerCamelCase, table=lowerCamelCase)
_lowercase : List[str] = np.char.translate(lowerCamelCase, table=lowerCamelCase)
_lowercase : str = predictions == references
return {"exact_match": np.mean(lowerCamelCase) * 1_00}
| 89 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89 | 1 |
import argparse
import json
from tqdm import tqdm
def UpperCamelCase_( ) -> str:
_lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=lowerCamelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=lowerCamelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=lowerCamelCase_ , help='where to store parsed gold_data_path file' , )
_lowercase : str = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
_lowercase : int = json.load(lowerCamelCase_ )
for dpr_record in tqdm(lowerCamelCase_ ):
_lowercase : Optional[Any] = dpr_record['question']
_lowercase : int = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(lowerCamelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 89 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = """speech_to_text"""
lowercase_ : List[Any] = ["""past_key_values"""]
lowercase_ : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self, lowerCamelCase=1_00_00, lowerCamelCase=12, lowerCamelCase=20_48, lowerCamelCase=4, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=4, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=2, lowerCamelCase=60_00, lowerCamelCase=10_24, lowerCamelCase=2, lowerCamelCase=(5, 5), lowerCamelCase=10_24, lowerCamelCase=80, lowerCamelCase=1, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = vocab_size
_lowercase : Optional[int] = d_model
_lowercase : Any = encoder_ffn_dim
_lowercase : Tuple = encoder_layers
_lowercase : Tuple = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Optional[int] = decoder_layers
_lowercase : str = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : str = attention_dropout
_lowercase : List[str] = activation_dropout
_lowercase : Optional[Any] = activation_function
_lowercase : int = init_std
_lowercase : str = encoder_layerdrop
_lowercase : List[str] = decoder_layerdrop
_lowercase : List[str] = use_cache
_lowercase : Tuple = encoder_layers
_lowercase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : str = max_source_positions
_lowercase : List[str] = max_target_positions
_lowercase : Dict = num_conv_layers
_lowercase : Tuple = list(lowerCamelCase)
_lowercase : str = conv_channels
_lowercase : int = input_feat_per_channel
_lowercase : str = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, decoder_start_token_id=lowerCamelCase, **lowerCamelCase, )
| 89 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyVaaControlnetImgaImgPipeline
lowercase_ : Tuple = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase_ : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase_ : str = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : List[str] = False
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Tuple = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Tuple = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : List[Any] = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = self.dummy_unet
_lowercase : List[Any] = self.dummy_movq
_lowercase : Union[str, Any] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : Any = DDIMScheduler(**lowerCamelCase)
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Any = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : str = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
# create hint
_lowercase : int = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Any = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Any = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Optional[Any] = self.get_dummy_components()
_lowercase : Any = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : List[str] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : Tuple = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : int = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy')
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Dict = init_image.resize((5_12, 5_12))
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
_lowercase : List[str] = torch.from_numpy(np.array(lowerCamelCase)).float() / 2_5_5.0
_lowercase : Union[str, Any] = hint.permute(2, 0, 1).unsqueeze(0)
_lowercase : int = 'A robot, 4k photo'
_lowercase : List[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa)
_lowercase : Union[str, Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : str = pipe_prior(
lowerCamelCase, image=lowerCamelCase, strength=0.8_5, generator=lowerCamelCase, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, hint=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=5_12, width=5_12, strength=0.5, output_type='np', )
_lowercase : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 89 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0) -> None:
"""simple docstring"""
_lowercase , _lowercase : str = row, column
_lowercase : Any = [[default_value for c in range(lowerCamelCase)] for r in range(lowerCamelCase)]
def __str__( self) -> str:
"""simple docstring"""
_lowercase : Tuple = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_lowercase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowercase : Optional[int] = max(lowerCamelCase, len(str(lowerCamelCase)))
_lowercase : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCamelCase) -> str:
nonlocal string_format_identifier
_lowercase : Union[str, Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
"""simple docstring"""
return str(self)
def UpperCamelCase ( self, lowerCamelCase) -> bool:
"""simple docstring"""
if not (isinstance(lowerCamelCase, (list, tuple)) and len(lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCamelCase) -> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
assert self.validate_indicies(lowerCamelCase)
_lowercase : Optional[Any] = value
def __add__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_lowercase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : int = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : List[str] = -self[r, c]
return result
def __sub__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCamelCase) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase, (int, float)): # Scalar multiplication
_lowercase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(lowerCamelCase, lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_lowercase : str = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowercase : Tuple = F'''Unsupported type given for another ({type(lowerCamelCase)})'''
raise TypeError(lowerCamelCase)
def UpperCamelCase ( self) -> Matrix:
"""simple docstring"""
_lowercase : List[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowercase : Union[str, Any] = self[r, c]
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
assert isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowercase : Dict = v.transpose()
_lowercase : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase_( ) -> None:
# a^(-1)
_lowercase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowercase : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_lowercase : Dict = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : Dict = 1, 2, -3
_lowercase : List[Any] = Matrix(3 , 1 , 0 )
_lowercase , _lowercase , _lowercase : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def UpperCamelCase_( ) -> None:
import doctest
doctest.testmod()
testa()
| 89 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = [], []
while len(lowerCamelCase_ ) > 1:
_lowercase , _lowercase : List[str] = min(lowerCamelCase_ ), max(lowerCamelCase_ )
start.append(lowerCamelCase_ )
end.append(lowerCamelCase_ )
collection.remove(lowerCamelCase_ )
collection.remove(lowerCamelCase_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 89 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 1 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[int] = UNetaDModel
lowercase_ : List[str] = """sample"""
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = 4
_lowercase : Any = 3
_lowercase : List[str] = (32, 32)
_lowercase : str = floats_tensor((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : List[str] = torch.tensor([10]).to(lowerCamelCase)
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
_lowercase : int = self.dummy_input
return init_dict, inputs_dict
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[str] = UNetaDModel
lowercase_ : List[Any] = """sample"""
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = 4
_lowercase : int = 4
_lowercase : Tuple = (32, 32)
_lowercase : Any = floats_tensor((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : Any = torch.tensor([10]).to(lowerCamelCase)
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return (4, 32, 32)
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return (4, 32, 32)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
_lowercase : int = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : str = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
self.assertEqual(len(loading_info['missing_keys']), 0)
model.to(lowerCamelCase)
_lowercase : List[str] = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda', 'This test is supposed to run on GPU')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Dict = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=lowerCamelCase)
model.to(lowerCamelCase)
_lowercase : Optional[Any] = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda', 'This test is supposed to run on GPU')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=lowerCamelCase)
model_accelerate.to(lowerCamelCase)
model_accelerate.eval()
_lowercase : Union[str, Any] = torch.randn(
1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0), )
_lowercase : List[str] = noise.to(lowerCamelCase)
_lowercase : List[Any] = torch.tensor([10] * noise.shape[0]).to(lowerCamelCase)
_lowercase : str = model_accelerate(lowerCamelCase, lowerCamelCase)['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_lowercase , _lowercase : Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update', output_loading_info=lowerCamelCase, low_cpu_mem_usage=lowerCamelCase)
model_normal_load.to(lowerCamelCase)
model_normal_load.eval()
_lowercase : Dict = model_normal_load(lowerCamelCase, lowerCamelCase)['sample']
assert torch_all_close(lowerCamelCase, lowerCamelCase, rtol=1E-3)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update')
model.eval()
model.to(lowerCamelCase)
_lowercase : List[Any] = torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0), )
_lowercase : Dict = noise.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([10] * noise.shape[0]).to(lowerCamelCase)
with torch.no_grad():
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase).sample
_lowercase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowercase : Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0])
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase, lowerCamelCase, rtol=1E-3))
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : str = UNetaDModel
lowercase_ : Any = """sample"""
@property
def UpperCamelCase ( self, lowerCamelCase=(32, 32)) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = 4
_lowercase : int = 3
_lowercase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : List[Any] = torch.tensor(batch_size * [10]).to(dtype=torch.intaa, device=lowerCamelCase)
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
_lowercase : List[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256', output_loading_info=lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
self.assertEqual(len(loading_info['missing_keys']), 0)
model.to(lowerCamelCase)
_lowercase : List[str] = self.dummy_input
_lowercase : Dict = floats_tensor((4, 3) + (2_56, 2_56)).to(lowerCamelCase)
_lowercase : Optional[Any] = noise
_lowercase : List[Any] = model(**lowerCamelCase)
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256')
model.to(lowerCamelCase)
_lowercase : Tuple = 4
_lowercase : List[str] = 3
_lowercase : str = (2_56, 2_56)
_lowercase : int = torch.ones((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : List[str] = torch.tensor(batch_size * [1E-4]).to(lowerCamelCase)
with torch.no_grad():
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase).sample
_lowercase : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowercase : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8])
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase, lowerCamelCase, rtol=1E-2))
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update')
model.to(lowerCamelCase)
_lowercase : Optional[int] = 4
_lowercase : Optional[Any] = 3
_lowercase : Optional[int] = (32, 32)
_lowercase : str = torch.ones((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : Tuple = torch.tensor(batch_size * [1E-4]).to(lowerCamelCase)
with torch.no_grad():
_lowercase : Optional[int] = model(lowerCamelCase, lowerCamelCase).sample
_lowercase : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowercase : Tuple = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6])
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase, lowerCamelCase, rtol=1E-2))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
| 89 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 1 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , **lowerCamelCase_ ) -> Optional[int]:
_lowercase : int = [x.strip() for x in open(lowerCamelCase_ ).readlines()]
_lowercase : List[Any] = [x.strip() for x in open(lowerCamelCase_ ).readlines()][: len(lowerCamelCase_ )]
_lowercase : Dict = calculate_rouge(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
if save_path is not None:
save_json(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 89 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 1 |
import requests
SCREAMING_SNAKE_CASE : str = "" # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE : Any = "https://api.openweathermap.org/data/2.5/"
def UpperCamelCase_( lowerCamelCase_ = "Chicago" , lowerCamelCase_ = APPID ) -> dict:
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def UpperCamelCase_( lowerCamelCase_ = "Kolkata, India" , lowerCamelCase_ = APPID ) -> dict:
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def UpperCamelCase_( lowerCamelCase_ = 55.68 , lowerCamelCase_ = 12.57 , lowerCamelCase_ = APPID ) -> dict:
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE : List[str] = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 89 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : int = "▁"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : Optional[int] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE : List[Any] = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self, lowerCamelCase, lowerCamelCase="<pad>", lowerCamelCase="</s>", lowerCamelCase="<unk>", lowerCamelCase="<mask_2>", lowerCamelCase="<mask_1>", lowerCamelCase=None, lowerCamelCase=1_03, lowerCamelCase = None, **lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : str = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCamelCase)}, but is'''
F''' {type(lowerCamelCase)}''')
_lowercase : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCamelCase), self.offset - 1)
]
if len(set(lowerCamelCase)) != len(lowerCamelCase):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''')
_lowercase : Optional[int] = additional_special_tokens_extended
else:
_lowercase : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset)]
_lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
_lowercase : str = mask_token_sent
_lowercase : Optional[int] = vocab_file
_lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase)
# add special tokens to encoder dict
_lowercase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1)})
_lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return len(self.sp_model) + self.offset
def UpperCamelCase ( self) -> Dict[str, int]:
"""simple docstring"""
_lowercase : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = self.__dict__.copy()
_lowercase : Union[str, Any] = None
return state
def __setstate__( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Dict = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase : Any = {}
_lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_lowercase : Any = self.sp_model.piece_to_id(lowerCamelCase)
return sp_id + self.offset
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_lowercase : Tuple = self.sp_model.IdToPiece(index - self.offset)
return token
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = []
_lowercase : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase) + token
_lowercase : List[str] = []
else:
current_sub_tokens.append(lowerCamelCase)
out_string += self.sp_model.decode(lowerCamelCase)
return out_string.strip()
def UpperCamelCase ( self, lowerCamelCase=False) -> Optional[Any]:
"""simple docstring"""
return 1
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : str = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase)
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Tuple = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase, 'wb') as fi:
_lowercase : Tuple = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase)
return (out_vocab_file,)
| 89 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_lowercase : Optional[int] = TaConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Union[str, Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 89 | 1 |
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE : Dict = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def UpperCamelCase_( ) -> int:
_lowercase : Any = Github(os.environ['GITHUB_TOKEN'] )
_lowercase : List[Any] = g.get_repo('huggingface/transformers' )
_lowercase : str = repo.get_issues(state='open' )
for issue in open_issues:
_lowercase : Any = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
_lowercase : Union[str, Any] = comments[0] if len(lowerCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Tuple = 0
if start < end:
_lowercase : Optional[Any] = randint(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = a[end]
_lowercase : int = a[pivot]
_lowercase : str = temp
_lowercase , _lowercase : Optional[Any] = _in_place_partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
count += _in_place_quick_sort(lowerCamelCase_ , lowerCamelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCamelCase_ , p + 1 , lowerCamelCase_ )
return count
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Optional[int] = 0
_lowercase : List[Any] = randint(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Union[str, Any] = a[end]
_lowercase : str = a[pivot]
_lowercase : Any = temp
_lowercase : Tuple = start - 1
for index in range(lowerCamelCase_ , lowerCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowercase : int = new_pivot_index + 1
_lowercase : Optional[int] = a[new_pivot_index]
_lowercase : Tuple = a[index]
_lowercase : str = temp
_lowercase : Any = a[new_pivot_index + 1]
_lowercase : Dict = a[end]
_lowercase : Optional[Any] = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE : Optional[int] = TemporaryFile()
SCREAMING_SNAKE_CASE : Optional[Any] = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE : Union[str, Any] = np.load(outfile)
SCREAMING_SNAKE_CASE : Any = len(M) - 1
SCREAMING_SNAKE_CASE : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Any = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0_5457_1817E-34 # unit of ℏ : J * s
SCREAMING_SNAKE_CASE : int = 3E8 # unit of c : m * s^-1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : List[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : Tuple = 0
_lowercase : Any = len(lowerCamelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCamelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
if len(lowerCamelCase_ ) <= 1:
return arr, 0
_lowercase : Any = len(lowerCamelCase_ ) // 2
_lowercase : List[str] = arr[0:mid]
_lowercase : Tuple = arr[mid:]
_lowercase , _lowercase : Dict = count_inversions_recursive(lowerCamelCase_ )
_lowercase , _lowercase : Optional[int] = count_inversions_recursive(lowerCamelCase_ )
_lowercase , _lowercase : List[str] = _count_cross_inversions(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Any = []
_lowercase : Union[str, Any] = 0
while i < len(lowerCamelCase_ ) and j < len(lowerCamelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCamelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCamelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCamelCase_( ) -> Optional[int]:
_lowercase : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowercase : Optional[int] = count_inversions_bf(lowerCamelCase_ )
_lowercase , _lowercase : Any = count_inversions_recursive(lowerCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , lowerCamelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowercase : List[str] = count_inversions_bf(lowerCamelCase_ )
_lowercase , _lowercase : List[str] = count_inversions_recursive(lowerCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , lowerCamelCase_ )
# an empty list should also have zero inversions
_lowercase : Tuple = []
_lowercase : Tuple = count_inversions_bf(lowerCamelCase_ )
_lowercase , _lowercase : List[str] = count_inversions_recursive(lowerCamelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowercase : List[str] = 0
_lowercase : Optional[int] = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Any = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : List[Any] = 1
for i in range(0 , len(lowerCamelCase_ ) ):
total *= numbers[i]
_lowercase : Optional[Any] = str(lowerCamelCase_ )
steps += 1
return steps
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowercase : Optional[int] = 0
_lowercase : str = str(lowerCamelCase_ )
while len(lowerCamelCase_ ) != 1:
_lowercase : Dict = [int(lowerCamelCase_ ) for i in num_string]
_lowercase : Any = 0
for i in range(0 , len(lowerCamelCase_ ) ):
total += numbers[i]
_lowercase : Dict = str(lowerCamelCase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import math
SCREAMING_SNAKE_CASE : Union[str, Any] = 10
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : List[str] = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase_( lowerCamelCase_ = 20 ) -> str:
_lowercase : List[Any] = math.comb(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase_ )
_lowercase : int = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 89 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = [[1, 2, 4], [1, 2, 3, 4]]
_lowercase : int = DisjunctiveConstraint(lowerCamelCase)
self.assertTrue(isinstance(dc.token_ids, lowerCamelCase))
with self.assertRaises(lowerCamelCase):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCamelCase):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCamelCase):
DisjunctiveConstraint(lowerCamelCase) # fails here
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = [[1, 2, 3], [1, 2, 4]]
_lowercase : Any = DisjunctiveConstraint(lowerCamelCase)
_lowercase , _lowercase , _lowercase : Tuple = dc.update(1)
_lowercase : Any = stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
_lowercase , _lowercase , _lowercase : str = dc.update(2)
_lowercase : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
_lowercase , _lowercase , _lowercase : List[str] = dc.update(3)
_lowercase : Optional[Any] = stepped is True and completed is True and reset is False
self.assertTrue(lowerCamelCase)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_lowercase : Optional[int] = DisjunctiveConstraint(lowerCamelCase)
_lowercase , _lowercase , _lowercase : Tuple = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
_lowercase , _lowercase , _lowercase : Optional[int] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
_lowercase , _lowercase , _lowercase : List[str] = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
_lowercase , _lowercase , _lowercase : str = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
_lowercase , _lowercase , _lowercase : Tuple = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
_lowercase , _lowercase , _lowercase : Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
_lowercase , _lowercase , _lowercase : Tuple = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 89 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = "▁"
SCREAMING_SNAKE_CASE : int = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
SCREAMING_SNAKE_CASE : str = {
"google/reformer-crime-and-punishment": 524288,
}
class _lowerCamelCase( _a ):
lowercase_ : Optional[Any] = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self, lowerCamelCase, lowerCamelCase="</s>", lowerCamelCase="<unk>", lowerCamelCase=[], lowerCamelCase = None, **lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
_lowercase : List[str] = vocab_file
_lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCamelCase ( self) -> Dict[str, int]:
"""simple docstring"""
_lowercase : str = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = self.__dict__.copy()
_lowercase : Tuple = None
return state
def __setstate__( self, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase : Dict = {}
_lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_lowercase : Any = self.sp_model.IdToPiece(lowerCamelCase)
return token
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Tuple = []
_lowercase : Optional[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase) + token
_lowercase : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase)
out_string += self.sp_model.decode(lowerCamelCase)
return out_string.strip()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Optional[Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase, 'wb') as fi:
_lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase)
return (out_vocab_file,)
| 89 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase_ : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
lowercase_ : int = field(
default=10_24, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase_ : Optional[str] = field(default=_a, metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_lowercase : int = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase : Tuple = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase : Tuple = data_args.train_file.split('.' )[-1]
_lowercase : int = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase : Any = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowercase : str = load_dataset('csv' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase : Optional[int] = load_dataset('json' , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase : Optional[Any] = raw_datasets['train'].features['label'].names
_lowercase : Any = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase_ , )
_lowercase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : int = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase : List[Any] = {'Refused': 0, 'Entailed': 1}
_lowercase : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
_lowercase : int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowercase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase : List[Any] = examples['statement']
_lowercase : Optional[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowercase : Union[str, Any] = tokenizer(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ )
_lowercase : Any = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowercase : str = raw_datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowercase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowercase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowercase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowercase : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowercase : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowercase : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : Dict = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : Tuple = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : Any = default_data_collator
elif training_args.fpaa:
_lowercase : str = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Optional[Any] = None
# Initialize our Trainer
_lowercase : List[str] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
_lowercase : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : List[Any] = train_result.metrics
_lowercase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : Tuple = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : Optional[int] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase : Any = predict_dataset.remove_columns('label' )
_lowercase : Optional[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' ).predictions
_lowercase : Union[str, Any] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowercase : str = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : str
lowercase_ : List[str]
lowercase_ : Optional[List[str]]
@dataclass
class _lowerCamelCase:
lowercase_ : List[int]
lowercase_ : List[int]
lowercase_ : Optional[List[int]] = None
lowercase_ : Optional[List[int]] = None
class _lowerCamelCase( _a ):
lowercase_ : List[str] = """train"""
lowercase_ : List[Any] = """dev"""
lowercase_ : List[str] = """test"""
class _lowerCamelCase:
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowerCamelCase) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase="[CLS]", lowerCamelCase=1, lowerCamelCase="[SEP]", lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=0, lowerCamelCase=0, lowerCamelCase=-1_00, lowerCamelCase=0, lowerCamelCase=True, ) -> List[InputFeatures]:
"""simple docstring"""
_lowercase : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase)}
_lowercase : Union[str, Any] = []
for ex_index, example in enumerate(lowerCamelCase):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d', lowerCamelCase, len(lowerCamelCase))
_lowercase : str = []
_lowercase : Tuple = []
for word, label in zip(example.words, example.labels):
_lowercase : Dict = tokenizer.tokenize(lowerCamelCase)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowerCamelCase) > 0:
tokens.extend(lowerCamelCase)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCamelCase) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowercase : int = tokenizer.num_special_tokens_to_add()
if len(lowerCamelCase) > max_seq_length - special_tokens_count:
_lowercase : Optional[int] = tokens[: (max_seq_length - special_tokens_count)]
_lowercase : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowercase : List[str] = [sequence_a_segment_id] * len(lowerCamelCase)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowercase : Union[str, Any] = [cls_token] + tokens
_lowercase : Any = [pad_token_label_id] + label_ids
_lowercase : List[str] = [cls_token_segment_id] + segment_ids
_lowercase : Dict = tokenizer.convert_tokens_to_ids(lowerCamelCase)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowercase : Optional[int] = [1 if mask_padding_with_zero else 0] * len(lowerCamelCase)
# Zero-pad up to the sequence length.
_lowercase : Any = max_seq_length - len(lowerCamelCase)
if pad_on_left:
_lowercase : Dict = ([pad_token] * padding_length) + input_ids
_lowercase : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowercase : List[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
_lowercase : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowerCamelCase) == max_seq_length
assert len(lowerCamelCase) == max_seq_length
assert len(lowerCamelCase) == max_seq_length
assert len(lowerCamelCase) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***')
logger.info('guid: %s', example.guid)
logger.info('tokens: %s', ' '.join([str(lowerCamelCase) for x in tokens]))
logger.info('input_ids: %s', ' '.join([str(lowerCamelCase) for x in input_ids]))
logger.info('input_mask: %s', ' '.join([str(lowerCamelCase) for x in input_mask]))
logger.info('segment_ids: %s', ' '.join([str(lowerCamelCase) for x in segment_ids]))
logger.info('label_ids: %s', ' '.join([str(lowerCamelCase) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
_lowercase : Optional[Any] = None
features.append(
InputFeatures(
input_ids=lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, label_ids=lowerCamelCase))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _lowerCamelCase( _a ):
lowercase_ : List[InputFeatures]
lowercase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase=False, lowerCamelCase = Split.train, ) -> int:
"""simple docstring"""
_lowercase : List[str] = os.path.join(
lowerCamelCase, 'cached_{}_{}_{}'.format(mode.value, tokenizer.__class__.__name__, str(lowerCamelCase)), )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowercase : List[Any] = cached_features_file + '.lock'
with FileLock(lowerCamelCase):
if os.path.exists(lowerCamelCase) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''')
_lowercase : str = torch.load(lowerCamelCase)
else:
logger.info(F'''Creating features from dataset file at {data_dir}''')
_lowercase : Optional[int] = token_classification_task.read_examples_from_file(lowerCamelCase, lowerCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
_lowercase : Optional[int] = token_classification_task.convert_examples_to_features(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, cls_token_at_end=bool(model_type in ['xlnet']), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=lowerCamelCase, pad_on_left=bool(tokenizer.padding_side == 'left'), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
logger.info(F'''Saving features into cached file {cached_features_file}''')
torch.save(self.features, lowerCamelCase)
def __len__( self) -> int:
"""simple docstring"""
return len(self.features)
def __getitem__( self, lowerCamelCase) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase:
lowercase_ : List[InputFeatures]
lowercase_ : int = -1_00
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase=False, lowerCamelCase = Split.train, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = token_classification_task.read_examples_from_file(lowerCamelCase, lowerCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
_lowercase : int = token_classification_task.convert_examples_to_features(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, cls_token_at_end=bool(model_type in ['xlnet']), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=lowerCamelCase, pad_on_left=bool(tokenizer.padding_side == 'left'), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowercase : Optional[Any] = tf.data.Dataset.from_generator(
lowerCamelCase, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa), (
{'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None])},
tf.TensorShape([None]),
), )
else:
_lowercase : Dict = tf.data.Dataset.from_generator(
lowerCamelCase, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa), (
{
'input_ids': tf.TensorShape([None]),
'attention_mask': tf.TensorShape([None]),
'token_type_ids': tf.TensorShape([None]),
},
tf.TensorShape([None]),
), )
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self) -> Any:
"""simple docstring"""
return len(self.features)
def __getitem__( self, lowerCamelCase) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 89 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.